Пример #1
0
def train_DA(epoch):
    net.train()
    params = list(net.parameters())
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        net.parameters()),
                                 lr=lr)
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    train_loss = 0
    data_loader = ImageDataLoader(train_path,
                                  train_gt_path,
                                  shuffle=True,
                                  gt_downsample=True,
                                  pre_load=False)
    best_mae = sys.maxsize

    step = -1
    train_loss = 0
    gt_count = 0
    et_count = 0
    for blob in data_loader:
        step = step + 1
        im_data = blob['data']
        gt_data = blob['gt_density']
        dtype = torch.FloatTensor

        # certified input
        im_data = torch.from_numpy(im_data).type(dtype)
        im_data = im_data.to(device)
        im_data = random_mask_batch_one_sample(im_data, keep, reuse_noise=True)
        im_data = Variable(im_data)

        gt_data = torch.from_numpy(gt_data).type(dtype)
        gt_data = gt_data.to(device)
        gt_data = Variable(gt_data)

        density_map = net(im_data, gt_data)
        zzk_loss = net.loss
        train_loss += zzk_loss.item()

        gt_data = gt_data.data.detach().cpu().numpy()
        gt_count = np.sum(gt_data)
        density_map = density_map.data.detach().cpu().numpy()
        et_count = np.sum(density_map)
        print("gt_count: ", gt_count)
        print("et_count: ", et_count)

        optimizer.zero_grad()
        zzk_loss.backward()
        optimizer.step()

    train_loss = train_loss / data_loader.get_num_samples()

    if epoch % 100 == 0:
        save_name = os.path.join(
            output_dir, '{}_{}_{}.h5'.format(method, dataset_name, epoch))
        network.save_net(save_name, net)
    return train_loss
def eval_test(net, test_path):
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False 

    output_dir = './output'
    model_path = args.Dataset+ '_trained_model.h5'
    model_name = os.path.basename(model_path).split('.')[0]

    if not os.path.exists(output_dir):
           os.mkdir(output_dir)
    output_dir = os.path.join(output_dir, 'dm_' + model_name)
    if not os.path.exists(output_dir):
            os.mkdir(output_dir)


    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()

    test_loader = ImageDataLoader(test_path, None, 'test_split', shuffle=False, gt_downsample=True, pre_load=True , Dataset=args.Dataset)

    for blob in test_loader:                        
        im_data = blob['data']
        net.training = False
        density_map = net(im_data)
        density_map = density_map.data.cpu().numpy()
        new_dm= density_map.reshape([ density_map.shape[2], density_map.shape[3] ])
        np.savetxt( output_dir + 'output_' + blob['fname'].split('.')[0] +'.csv', new_dm, delimiter=',', fmt='%.6f')
   
    return net
Пример #3
0
def reloadData():
    data_loader_res = ImageDataLoader(data_path_res,
                                      gt_path,
                                      shuffle=False,
                                      gt_downsample=True,
                                      pre_load=True,
                                      no_gt=True)
    data_loader_out = ImageDataLoader(data_path_out,
                                      gt_path,
                                      shuffle=False,
                                      gt_downsample=True,
                                      pre_load=True,
                                      no_gt=True)
    data_loader_class = ImageDataLoader(data_path_class,
                                        gt_path,
                                        shuffle=False,
                                        gt_downsample=True,
                                        pre_load=True,
                                        no_gt=True)
Пример #4
0
#Tensorboard  config
use_tensorboard = False
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False  # remove all historical experiments in TensorBoardO
exp_name = None  # the previous experiment name in TensorBoard

rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)

#loadt training and validation data
data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              shuffle=True,
                              gt_downsample=False,
                              pre_load=True)
class_wts = data_loader.get_classifier_weights()
data_loader_val = ImageDataLoader(val_path,
                                  val_gt_path,
                                  shuffle=False,
                                  gt_downsample=False,
                                  pre_load=True)

#load net and initialize it
net = CrowdCounter(ce_weights=class_wts)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()
Пример #5
0
    if exp_name is None:
        exp_name = save_exp_name
        exp = cc.create_experiment(exp_name)
    else:
        exp = cc.open_experiment(exp_name)

# training
train_loss = 0
step_cnt = 0
re_cnt = False
t = Timer()
t.tic()

data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              shuffle=True,
                              gt_downsample=True,
                              pre_load=True)
data_loader_val = ImageDataLoader(val_path,
                                  val_gt_path,
                                  shuffle=False,
                                  gt_downsample=True,
                                  pre_load=True)
best_mae = sys.maxsize

for epoch in range(start_step, end_step + 1):
    step = -1
    train_loss = 0
    for blob in list(data_loader):
        step = step + 1
        im_data = blob['data']
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed_all(rand_seed)

### initialize network
net = CrowdCounter(model=model,pool=pool)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

### optimizer
params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

### load data
pre_load=True   
data_loader = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=pre_load, 
                              batch_size=batch_size,scaling=scaling)
data_loader_val = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=pre_load, 
                              batch_size=1,scaling=scaling)

### training
train_loss = 0
t = Timer()
t.tic()
best_mae = sys.maxint

for epoch in range(start_step, end_step+1):    
    step = 0
    train_loss = 0
    for blob in data_loader:                
        step = step + 1    
        im_data = blob['data']
Пример #7
0
def test_DA(epoch):
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False

    data_path_DA = '../ShanghaiTech/part_A_final/test_data/images/'
    gt_path_DA = '../ShanghaiTech/part_A_final/test_data/after_ground_truth/'

    net.to(device)
    net.eval()
    mae = 0.0
    mse = 0.0

    data_loader_DA = ImageDataLoader(data_path_DA,
                                     gt_path_DA,
                                     shuffle=False,
                                     gt_downsample=True,
                                     pre_load=False)

    # 保存图片
    if not os.path.exists('./results_DA_ablated'):
        os.mkdir('./results_DA_ablated')
    if not os.path.exists('./results_DA_ablated/density_map_adv'):
        os.mkdir('./results_DA_ablated/density_map_adv')
    if not os.path.exists('./results_DA_ablated/images_adv'):
        os.mkdir('./results_MCNN_DA/images_adv')
    if not os.path.exists('./results_DA_ablated/images_gt'):
        os.mkdir('./results_DA_ablated/images_gt')

    correct = 0
    total = 0
    dtype = torch.FloatTensor

    # ablated test
    for blob in data_loader_DA:
        im_data = blob['data']
        gt_data = blob['gt_density']
        full_imgname = blob['fname']

        # certified input
        im_data = torch.from_numpy(im_data).type(dtype)
        im_data = im_data.to(device)
        im_data = random_mask_batch_one_sample(im_data, keep, reuse_noise=True)
        im_data = Variable(im_data)

        gt_data = torch.from_numpy(gt_data).type(dtype)
        gt_data = gt_data.to(device)
        gt_data = Variable(gt_data)

        density_map = net(im_data, gt_data)

        density_map = density_map.data.cpu().numpy()
        im_data = im_data.data.cpu().numpy()
        gt_data = gt_data.data.cpu().numpy()

        tgt_img = gt_data[0][0]
        plt.imsave(
            './results_DA_ablated/images_gt/IMG_{}.png'.format(full_imgname),
            tgt_img,
            format='png',
            cmap='gray')

        adv_tgt_img = im_data[0][0]
        plt.imsave(
            './results_DA_ablated/images_adv/IMG_{}.png'.format(full_imgname),
            adv_tgt_img,
            format='png',
            cmap=plt.cm.jet)

        adv_out = density_map[0][0]
        plt.imsave('./results_DA_ablated/density_map_adv/IMG_{}.png'.format(
            full_imgname),
                   adv_out,
                   format='png',
                   cmap='gray')

        et_count = np.sum(density_map)
        gt_count = np.sum(gt_data)

        bias = abs(et_count - gt_count)

        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))

        if bias < 10:
            correct += 1
        total += 1

    accuracy = (correct / total) * 100.0
    print("correct: ", correct)
    print("total: ", total)
    mae = mae / data_loader_DA.get_num_samples()
    mse = np.sqrt(mse / data_loader_DA.get_num_samples())
    print("test_ablated_results: ")
    print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse))
    print("test_ablated_accuracy: ", accuracy)

    # 保存图片
    if not os.path.exists('./results_DA_normal'):
        os.mkdir('./results_DA_normal')
    if not os.path.exists('./results_DA_normal/density_map_adv'):
        os.mkdir('./results_DA_normal/density_map_adv')
    if not os.path.exists('./results_DA_normal/images_gt'):
        os.mkdir('./results_DA_normal/images_gt')

    total = 0
    correct = 0
    mae = 0.0
    mse = 0.0

    for blob in data_loader_DA:
        im_data = blob['data']
        gt_data = blob['gt_density']
        full_imgname = blob['fname']
        tgt_img = gt_data[0][0]
        plt.imsave('./results_DA_normal/images_gt/{}'.format(full_imgname),
                   tgt_img,
                   format='png',
                   cmap='gray')

        im_data = torch.from_numpy(im_data).type(dtype)
        im_data = im_data.to(device)

        gt_data = torch.from_numpy(gt_data).type(dtype)
        gt_data = gt_data.to(device)
        gt_data = Variable(gt_data)

        density_map = net(im_data, gt_data)

        density_map = density_map.data.detach().cpu().numpy()
        gt_data = gt_data.data.detach().cpu().numpy()

        adv_out = density_map[0][0]
        plt.imsave(
            './results_DA_normal/density_map_adv/{}'.format(full_imgname),
            adv_out,
            format='png',
            cmap='gray')

        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)

        bias = abs(gt_count - et_count)

        mae += abs(gt_count - et_count)
        mse += (gt_count - et_count) * (gt_count - et_count)

        # !!! 具体评判预测成功的指标待定!!!
        if bias < 10:
            correct += 1
        total += 1

    accuracy = (correct / total) * 100.0
    print("correct: ", correct)
    print("total: ", total)
    mae = mae / data_loader_1.get_num_samples()
    mse = np.sqrt(mse / data_loader_1.get_num_samples())
    print("test_normal_result: ")
    print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse))
    print("normal_test_accuracy: ", accuracy)
 def test_load_data(self):
     dicoms, targets = ImageDataLoader('test_data/link.csv', 'test_data/dicoms/', 'test_data/contourfiles/').load_data()
     self.assertEqual(len(dicoms), 4, "Should be 4")
     self.assertEqual(len(targets), 4, "Should be 4")
     self.assertEqual(dicoms[2].shape, (256, 256), "Should be (256, 256")
     self.assertEqual(targets[2].shape, (256, 256), "Should be (256, 256")
Пример #9
0
def main(args):

    dicoms, icontours, ocontours = ImageDataLoader(
        args.links_filename, args.dicom_path, args.contour_path).load_data()
    #dataset = generate_batches_for_training(args.epochs, args.batch_size, args.buffer_size, dicoms, targets)
    visualize_images(dicoms, icontours, ocontours)
Пример #10
0
import tensorflow as tf
import numpy as np
from src.network import LoadNet
from src.data_loader import ImageDataLoader

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

meta_path = './models/crowncnn-14.meta'
model_path = './models/'
val_path = '/local/share/DeepLearning/crowdcount-mcnn/data/formatted_trainval/shanghaitech_part_A_patches_9/val'
val_gt_path = '/local/share/DeepLearning/crowdcount-mcnn/data/formatted_trainval/shanghaitech_part_A_patches_9/val_den'

val_loader = ImageDataLoader(val_path, val_gt_path, shuffle=True, gt_downsample=True, pre_load=True)

ln = LoadNet(meta_path)
graph, sess = ln.load_net(sess, model_path)

im_data = graph.get_tensor_by_name('im_data:0')
density_op = graph.get_collection('density_op')[0]

for blob in val_loader:
	data = blob['data']
	den = blob['den']
	pred_data = sess.run(density_op, feed_dict={im_data: data})

	gt_count = np.sum(den)
	pred_count = np.sum(pred_data)
	print('gt_count is {}, pred_count is {}'.format(gt_count, pred_count))
Пример #11
0
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0
mape = 0.0
u = 0.0
v = 0.0

y_true = []
y_pred = []
y_diff = []
# load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)
precise = 0.0
recall = 0.0

for blob in data_loader:
    im_data = blob['data']
    gt_data = blob['gt_density']
    density_map = net(im_data, gt_data)
    density_map = density_map.data.cpu().numpy()
    gt_count = np.sum(gt_data)
    et_count = np.sum(density_map)
    y_true.append(gt_count)
    y_pred.append(et_count)
    y_diff.append(gt_count - et_count)
Пример #12
0
net = CrowdCounter(model, pool)
trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()

if model in ['base', 'wide']:
    scaling = 4
if model == 'deep':
    scaling = 8

#load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=False,
                              batch_size=1,
                              scaling=scaling)

mae = 0.0
mse = 0.0
num = 0
for blob in data_loader:
    num += 1
    im_data = blob['data']
    gt_data = blob['gt_density']
    density_map = net(im_data)
    density_map = density_map.data.cpu().numpy()
    gt_count = np.sum(gt_data)
    et_count = np.sum(density_map)
Пример #13
0
def main():
    # define output folder
    output_dir = './saved_models/'
    log_dir = './mae_mse/'
    checkpoint_dir = './checkpoint/'

    train_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/train/images'
    train_gt_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/train/ground_truth'
    val_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/val/images'
    val_gt_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/val/ground_truth'

    # last checkpoint
    checkpointfile = os.path.join(checkpoint_dir, 'checkpoint.94.pth.tar')

    # some description
    method = 'mcnn'
    dataset_name = 'SH_B'

    # log file
    f_train_loss = open(os.path.join(log_dir, "train_loss.csv"), "a+")
    f_val_loss = open(os.path.join(log_dir, "val_loss.csv"), "a+")

    # Training configuration
    start_epoch = 0
    end_epoch = 97
    lr = 0.00001
    # momentum = 0.9
    disp_interval = 1000
    # log_interval = 250

    # Flag
    CONTINUE_TRAIN = True
    # Tensorboard  config

    # use_tensorboard = False
    # save_exp_name = method + '_' + dataset_name + '_' + 'v1'
    # remove_all_log = False   # remove all historical experiments in TensorBoard
    # exp_name = None # the previous experiment name in TensorBoard

    # -----------------------------------------------------------------------------------------
    rand_seed = 64678
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)

    # Define network
    net = CrowdCounter()
    network.weights_normal_init(net, dev=0.01)
    # net.cuda()
    net.train()
    # params = list(net.parameters())
    optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # # tensorboad
    # use_tensorboard = use_tensorboard and CrayonClient is not None
    # if use_tensorboard:
    #     cc = CrayonClient(hostname='127.0.0.1')
    #     if remove_all_log:
    #         cc.remove_all_experiments()
    #     if exp_name is None:
    #         exp_name = save_exp_name
    #         exp = cc.create_experiment(exp_name)
    #     else:
    #         exp = cc.open_experiment(exp_name)

    # training param

    if CONTINUE_TRAIN:
        net, optimizer, start_epoch = utils.load_checkpoint(
            net, optimizer, filename=checkpointfile)

    train_loss = 0
    step_cnt = 0
    re_cnt = False
    t = Timer()
    t.tic()

    # Load data
    data_loader = ImageDataLoader(
        train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=True)
    data_loader_val = ImageDataLoader(
        val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True)
    best_mae = sys.maxsize

    # Start training

    for this_epoch in range(start_epoch, end_epoch-1):
        step = -1
        train_loss = 0
        for blob in data_loader:
            step += 1
            img_data = blob['data']
            gt_data = blob['gt_density']
            et_data = net(img_data, gt_data)
            loss = net.loss
            train_loss += loss.data
            step_cnt += 1
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % disp_interval == 0:
                duration = t.toc(average=False)
                fps = step_cnt / duration
                gt_count = np.sum(gt_data)
                et_data = et_data.data.cpu().numpy()
                et_count = np.sum(et_data)
                utils.save_results(img_data, gt_data, et_data, output_dir,
                                   fname="{}.{}.png".format(this_epoch, step))
                log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (this_epoch,
                                                                                                step, 1./fps, gt_count, et_count)
                log_print(log_text, color='green', attrs=['bold'])
                re_cnt = True

            if re_cnt:
                t.tic()
                re_cnt = False

        # Save checkpoint
        state = {'epoch': this_epoch, 'state_dict': net.state_dict(),
                 'optimizer': optimizer.state_dict()}
        cp_filename = "checkpoint.{}.pth.tar".format(this_epoch)
        torch.save(state, os.path.join(checkpoint_dir, cp_filename))
# ========================== END 1 EPOCH==================================================================================
        train_mae, train_mse = evaluate_network(net, data_loader)
        f_train_loss.write("{},{}\n".format(train_mae, train_mse))
        log_text = 'TRAINING - EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (
            this_epoch, train_mae, train_mse)
        log_print(log_text, color='green', attrs=['bold'])
# =====================================================VALIDATION=========================================================
        # calculate error on the validation dataset
        val_mae, val_mse = evaluate_network(net, data_loader_val)
        f_val_loss.write("{},{}\n".format(val_mae, val_mse))
        log_text = 'VALIDATION - EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (
            this_epoch, val_mae, val_mse)
        log_print(log_text, color='green', attrs=['bold'])
        # SAVE model
        is_save = False
        if val_mae <= best_mae:
            if val_mae < best_mae:
                is_save = True
                best_mae = val_mae
                best_mse = val_mse
            else:
                if val_mse < best_mse:
                    is_save = True
                    best_mse = val_mse

        if is_save:
            save_name = os.path.join(output_dir, '{}_{}_{}.h5'.format(
                method, dataset_name, this_epoch))
            network.save_net(save_name, net)
            best_model = '{}_{}_{}.h5'.format(method, dataset_name, this_epoch)
            log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % (
                best_mae, best_mse, best_model)
            log_print(log_text, color='green', attrs=['bold'])

        # if use_tensorboard:
        #     exp.add_scalar_value('MAE', mae, step=epoch)
        #     exp.add_scalar_value('MSE', mse, step=epoch)
        #     exp.add_scalar_value('train_loss', train_loss /
        #                          data_loader.get_num_samples(), step=epoch)

    f_train_loss.close()
    f_val_loss.close()
Пример #14
0
        exp_name = save_exp_name
        exp = cc.create_experiment(exp_name)
    else:
        exp = cc.open_experiment(exp_name)

# training
train_loss = 0
step_cnt = 0
re_cnt = False
t = Timer()
t.tic()

# downsample = True
data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              gt_downsample=True,
                              pre_load=True)
# data_loader_val = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True)
data_loader_test = TestImageDataLoader(test_path,
                                       test_gt_path,
                                       shuffle=False,
                                       gt_downsample=True,
                                       pre_load=True)

# downsample = False
# data_loader = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=False, pre_load=True)
# data_loader_val = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True)
# data_loader_test = ImageDataLoader(test_path, test_gt_path, shuffle=False, gt_downsample=False, pre_load=True)

best_mae = sys.maxsize

net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=args.learning_rate)

if not os.path.exists(args.SAVE_ROOT):
    os.mkdir(args.SAVE_ROOT)

# start training
# train, validation/self training and testing model

if args.MODE == 'all' or args.MODE == 'train':
    data_loader_train = ImageDataLoader(train_path, train_gt_path,'train_split', shuffle=False, gt_downsample=True, pre_load=True, Dataset=args.Dataset)
    net = train(net, data_loader_train,optimizer,args.MAX_EPOCHS)
    network.save_net(args.SAVE_ROOT+'/'+args.Dataset+'_trained_model.h5', net) 


if args.MODE == 'all' or args.MODE == 'val':
    net = val(net,val_path, optimizer,args.VAL_EPOCHS, Dataset=args.Dataset)
    network.save_net(args.SAVE_ROOT+'/'+args.Dataset+'_self_trained_model_val.h5', net) 
    
    
if args.MODE == 'all' or args.MODE == 'test':
    net = test(net,test_path, optimizer,args.VAL_EPOCHS, Dataset=args.Dataset)
    network.save_net(args.SAVE_ROOT+'/'+args.Dataset+'_self_trained_model_test.h5', net)

#if args.MODE == 'eval_all' or args.MODE == 'eval_val':
#    eval_val(net, val_path)
Пример #16
0
if not os.path.exists(save_dir):
    os.mkdir(save_dir)



net = CrowdCounter()
      
trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
#net.cuda()
net.eval()
#mae = 0.0
#mse = 0.0

#load test data
data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=False, pre_load=True)


for blob in data_loader:                        
    im_data = blob['data']
    #gt_data = blob['gt_density']
    gt_data = None
    density_map = net(im_data, gt_data)
    density_map = density_map.data.cpu().numpy()
    #gt_count = np.sum(gt_data)
    gt_count = 0
    et_count = np.sum(density_map)
    print(et_count)
    mae += abs(gt_count-et_count)
    mse += ((gt_count-et_count)*(gt_count-et_count))
    if vis:
def test(net,test_path,optimizer, num_epochs, Dataset=args.Dataset):

    if Dataset=="fdst":
      num_sessions=3
      test_len=750
      low_limit=451
      high_limit=750
    else:
        num_sessions=8
        test_len=2000
        low_limit=1201
        high_limit=2000
    #print(num_sessions)


    sessions_list = []
    ses_size = 100
    
    for i in range(low_limit, high_limit,ses_size): 
      sessions_list.append(i)
    sessions_list.append(test_len)
    #print("test list: ", sessions_list)
    for test_inc in range(len(sessions_list)-1):
        start_frame = sessions_list[test_inc]
        end_frame = sessions_list[test_inc+1]
        #print('start:,end:', (start_frame,end_frame))

        test_loader = ImageDataLoader_Val_Test(test_path, None,'test_split',start_frame, end_frame, shuffle=False, gt_downsample=True, pre_load=True, Dataset=args.Dataset)
        log_file = open(args.SAVE_ROOT+"/"+args.Dataset+"_test.log","w",1)
        log_print("test/Self Training ....", color='green', attrs=['bold'])
        # training
        train_loss = 0
        step_cnt = 0
        re_cnt = False
        t = Timer()
        t.tic()
        for epoch in range(1,num_epochs+1):
            step = -1
            train_loss = 0
            for blob in test_loader:                
                step = step + 1        
                im_data = blob['data']
                net.training = False
                gt_data = net(im_data)
                gt_data = gt_data.cpu().detach().numpy()
                net.training = True
                density_map = net(im_data, gt_data)
                loss = net.loss
                train_loss += loss.data
                step_cnt += 1
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            
                if step % disp_interval == 0:            
                  duration = t.toc(average=False)
                  fps = step_cnt / duration
                  gt_count = np.sum(gt_data)    
                  density_map = density_map.data.cpu().numpy()
                  et_count = np.sum(density_map)
                  utils.save_results(im_data,gt_data,density_map, args.SAVE_ROOT)
                  log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (epoch,
                      step, 1./fps, gt_count,et_count)
                  log_print(log_text, color='green', attrs=['bold'])
                  re_cnt = True   
                if re_cnt:                                
                  t.tic()
                  re_cnt = False

        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = False 

        session= str(sessions_list[test_inc])
        network.save_net(args.SAVE_ROOT+'/'+args.Dataset+ session +'_self_trained_model_test.h5', net) 
        output_dir = './densitymaps/' + args.Dataset + session 
        net.cuda()
        net.eval()

        all_test_loader = ImageDataLoader(test_path, None, 'test_split', shuffle=False, gt_downsample=True, pre_load=True , Dataset=args.Dataset)

        for blob in all_test_loader:                        
            im_data = blob['data']
            net.training = False
            density_map = net(im_data)
            density_map = density_map.data.cpu().numpy()
            new_dm= density_map.reshape([ density_map.shape[2], density_map.shape[3] ])
            
            np.savetxt(output_dir + '_output_' + blob['fname'].split('.')[0] +'.csv', new_dm, delimiter=',', fmt='%.6f')

    return net
Пример #18
0
    if remove_all_log:
        cc.remove_all_experiments()
    if exp_name is None:    
        exp_name = save_exp_name 
        exp = cc.create_experiment(exp_name)
    else:
        exp = cc.open_experiment(exp_name)

# training
train_loss = 0
step_cnt = 0
re_cnt = False
t = Timer()
t.tic()

data_loader = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=True)
data_loader_val = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True)
best_mae = sys.maxsize

for epoch in range(start_step, end_step+1):    
    step = -1
    train_loss = 0
    for blob in data_loader:
        step = step + 1        
        im_data = blob['data']
        gt_data = blob['gt_density']
        density_map = net.forward(im_data, gt_data)
        loss = net.loss
        train_loss += loss.item()
        step_cnt += 1
        optimizer.zero_grad()
Пример #19
0
    os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
rmse = 0.0
mrmse = 0.0

# load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)

for blob in data_loader:
    im_data = blob['data']
    gt_data = blob['gt_density']
    # print('gt_data: ',gt_data)
    density_map = net(im_data, gt_data)
    density_map = density_map.data.cpu().numpy()
    # print('density_map: ',density_map)
    gt_count = np.sum(gt_data)
    # print('gt_count: ',gt_count)
    et_count = np.sum(density_map)
    # print('et_count: ',et_count)
    mae += abs(gt_count - et_count)
Пример #20
0
def testimage(modelname, camname):
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False
    vis = False
    save_output = False

    #test data and model file path
    if camname == 0:
        data_path = '../data/test/images/'
    else:
        data_path = '../data/test/images2/'

    if modelname == 'A':
        model_path = './final_models/cmtl_shtechA_204.h5'
    else:
        model_path = './final_models/cmtl_shtechB_768.h5'
    print("Model name:", modelname, " Camname: ", camname)
    gt_flag = False
    if gt_flag:
        gt_path = '../dataset/ShanghaiTech/part_A/test_data/ground_truth/'

    # =============================================================================
    # for i in range(1, 4):
    #     gt_name = os.path.join(gt_path,'img_' + format(i, '04') + '_ann.mat')
    #     print(gt_name)
    #     x = loadmat(gt_name)
    #     print (len(x['annPoints']))
    #
    # =============================================================================
    output_dir = './output/'

    model_name = os.path.basename(model_path).split('.')[0]
    file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    #load test data
    data_loader = ImageDataLoader(data_path,
                                  shuffle=False,
                                  gt_downsample=True,
                                  pre_load=True)

    net = CrowdCounter()

    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    i = 1
    #df = pd.read_csv("../etcount.csv")
    #df = df.set_index('IMG_NAME')
    #df['GROUND_TRUTH'] = 0.0
    #df['MTL-v4-A10'] = 0.0

    for blob in data_loader:
        if gt_flag:
            gt_name = os.path.join(
                gt_path, 'GT_' + format(blob['fname'].split('.')[0]) + '.mat')
            x = loadmat(gt_name)
            #gt_count = len(x['image_info'][0][0][0][0][0])
            #df.at[blob['fname'].split('.')[0], 'GROUND_TRUTH'] = gt_count
            i += 1
        im_data = blob['data']
        density_map = net(im_data)
        density_map = density_map.data.cpu().numpy()
        x = len(density_map[0][0])
        y = len(density_map[0][0][0])
        half = (int)(x / 2)
        density_map1 = density_map[0][0][0:half][:]
        density_map2 = density_map[0][0][half:][:]

        print(x, y)
        et_c1 = np.sum(density_map1)
        et_c2 = np.sum(density_map2)
        side = 'none'
        if et_c1 > et_c2:
            side = 'right'
        else:
            side = 'left'
        print(et_c1, et_c2)
        et_count = np.sum(density_map)

        print(blob['fname'].split('.')[0], ' Model Estimated count : ',
              et_count)
        #df.at[blob['fname'].split('.')[0], 'MTL-v4-A'] = et_count
        if vis:
            utils.display_results(im_data, density_map)
        if save_output:
            utils.save_density_map(
                density_map, output_dir,
                'output_' + blob['fname'].split('.')[0] + '.png')

    return (et_count, side)

    #df.to_csv('../etcount.csv')


#testimage('A', 1)