Ejemplo n.º 1
0
def pre_MCNN():

    MCNN_model_path = './saved_models/mcnn_shtechA_58.h5' #MCNN模型路径
    net = CrowdCounter()
    trained_model = os.path.join(MCNN_model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    return net
Ejemplo n.º 2
0
def evaluate_model(trained_model, data_loader):
    net = CrowdCounter()
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        density_map = net.forward(im_data, gt_data)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
Ejemplo n.º 3
0
if not os.path.isdir(outpath):
    os.mkdir(outpath)

model_path = r'result_1001/mcnn_shtechA_348.h5' # result_1001 best model
model_name = os.path.basename(model_path).split('.')[0]

img_name = r''
gt_name = r''

if os.path.isdir(img_name):
    image = 

analysis_report = os.path.join(outpath, 'Analysis_' + img_name + '.txt')
print('output analysis report path is {}'.format(analysis_report))

net = CrowdCounter()
trained_model = os.path.join(model_path)

# read model's parameter 
h5f = h5py.File(trained_model, mode='r')

for k, v in net.state_dict().items():
    param = torch.from_numpy(np.asarray(h5f[k]))
    v.copy_(param)

net.cuda()
net.eval()

mae, mse = 0, 0

print(net)
Ejemplo n.º 4
0
#Tensorboard  config
use_tensorboard = False
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False  # remove all historical experiments in TensorBoard
exp_name = None  # the previous experiment name in TensorBoard

# ------------
rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)

# load net
net = CrowdCounter()
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
#loadt training and validation data
data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              shuffle=True,
                              gt_downsample=False,
                              pre_load=True)
class_wts = data_loader.get_classifier_weights()
data_loader_val = ImageDataLoader(val_path,
                                  val_gt_path,
                                  shuffle=False,
                                  gt_downsample=False,
                                  pre_load=True)

#load net and initialize it
net = CrowdCounter(ce_weights=class_wts)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
Ejemplo n.º 6
0
# tensorboard  config
use_tensorboard = False
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False  # remove all historical experiments in tensorboard
exp_name = None  # the previous experiment name in tensorboard

# ------------
rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)

# load net
net = CrowdCounter()
network.weights_normal_init(net, std=0.01)
net.cuda()
net.train()  # sets the module in training mode

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and crayonclient is not None
if use_tensorboard:
Ejemplo n.º 7
0
    if dataset_name == 'shtechA':
        lr = 1e-5
    if dataset_name == 'shtechB':
        lr = 5*1e-6
    scaling=8  # output density map is 1/8 size of input image     
print 'learning rate %f' % (lr)

### random seed
rand_seed = 64678  
if rand_seed is not None: 
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed_all(rand_seed)

### initialize network
net = CrowdCounter(model=model,pool=pool)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

### optimizer
params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

### load data
pre_load=True   
data_loader = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=pre_load, 
                              batch_size=batch_size,scaling=scaling)
data_loader_val = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=pre_load, 
                              batch_size=1,scaling=scaling)
Ejemplo n.º 8
0
save_output = True

data_path = './data/original/shanghaitech/part_A_final/test_data/images/'
gt_path = './data/original/shanghaitech/part_A_final/test_data/ground_truth_csv/'
model_path = './saved_models/mcnn_shtechA_430.h5'

output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0

#load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)
Ejemplo n.º 9
0
    if dataset_name == 'shtechA':
        lr = 1e-5
    if dataset_name == 'shtechB':
        lr = 5 * 1e-6
    scaling = 8  # output density map is 1/8 size of input image
print 'learning rate %f' % (lr)

### random seed
rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed_all(rand_seed)

### initialize network
net = CrowdCounter(model=model, pool=pool)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

### optimizer
params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)

### load data
pre_load = True
data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              shuffle=True,
Ejemplo n.º 10
0
#loadt training and validation data
data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              shuffle=True,
                              gt_downsample=False,
                              pre_load=True)
class_wts = data_loader.get_classifier_weights()
data_loader_val = ImageDataLoader(val_path,
                                  val_gt_path,
                                  shuffle=False,
                                  gt_downsample=False,
                                  pre_load=True)

#load net and initialize it
net = CrowdCounter(ce_weights=class_wts)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
Ejemplo n.º 11
0
save_output = True

data_path = './data-oilpalm/oilpalm-test/IMG_Palm/'
gt_path = './data-oilpalm/oilpalm-test/ground_truth_csv/'
model_path = './oilpalm_saved_models/mcnn_oilpalm_70.h5'

output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
rmse = 0.0
mrmse = 0.0

# load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)
Ejemplo n.º 12
0
save_output = True

# data_path = './data/original/shanghaitech/part_B_final/test_data/images/'
# gt_path = './data/original/shanghaitech/part_B_final/test_data/ground_truth_csv/'
model_path = './final_models/mcnn_shtechA_660.h5'

output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
net.training = False

im_data = cv2.imread(
    '/home/haoxue/Pictures/Screenshot from 2018-11-05 15-26-25.png', 0)
im_data = im_data.reshape((1, 1, im_data.shape[0], im_data.shape[1]))
density_map = net(im_data)
density_map = density_map.data.cpu().numpy()
et_count = np.sum(density_map)
if vis:
    utils.display_density(im_data, density_map)
Ejemplo n.º 13
0
save_output = False

image_path = 'othertest/test.1.jpg'
model_path = 'saved_models/mcnn_SH_B_95.h5'

# output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]

# file_results = os.path.join(output_dir,'results_' + model_name + '_.txt')
# if not os.path.exists(output_dir):
#     os.mkdir(output_dir)
# output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
# if not os.path.exists(output_dir):
#     os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
# net.cuda()
# net.eval()
mae = 0.0
mse = 0.0

#load test data
# data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=True)
img = cv2.imread(image_path, 0)

img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
Ejemplo n.º 14
0
# tensorboard  config
use_tensorboard = False
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False  # remove all historical experiments in tensorboard
exp_name = None  # the previous experiment name in tensorboard

# ------------
rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)

# load net
net = CrowdCounter()
network.weights_normal_init(net, std=0.01)
net.cuda()
net.train()  # sets the module in training mode

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and crayonclient is not None
if use_tensorboard:
    cc = crayonclient(hostname='127.0.0.1')
    if remove_all_log:
Ejemplo n.º 15
0
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
vis = False
save_output = True

output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

net = CrowdCounter(model, pool)
trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()

if model in ['base', 'wide']:
    scaling = 4
if model == 'deep':
    scaling = 8

#load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
Ejemplo n.º 16
0
    f.write('MAE: %0.2f, MSE: %0.2f' % (mae,mse))
    f.close()


if __name__ == '__main__':

    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False

    # data_path = './data/original/shanghaitech/part_B_final/test_data/images/'
    # gt_path = './data/original/shanghaitech/part_B_final/test_data/ground_truth_csv/'

    data_path = './data/original/shanghaitech/part_A_final/test_data/images/'
    gt_path = './data/original/shanghaitech/part_A_final/test_data/ground_truth_csv/'

    model_path = './Shanghai_A_Retrain_eps_1_5/saved_models//MCNN_Shanghai_A.h5'

    model = CrowdCounter()
    trained_model = os.path.join(model_path)
    network.load_net(trained_model, model)
    model.to(device)
    model.eval()

    data_loader = ImageDataLoader(data_path, gt_path, shuffle=False, gt_downsample=True, pre_load=True)

    eps_list = [0, 0.25, 0.5, 0.75, 1]

    print(model_path)

    epoch_robust_bound(data_loader, model, device, epsilon_try=0.25)
Ejemplo n.º 17
0
def main():
    # define output folder
    output_dir = './saved_models/'
    log_dir = './mae_mse/'
    checkpoint_dir = './checkpoint/'

    train_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/train/images'
    train_gt_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/train/ground_truth'
    val_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/val/images'
    val_gt_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/val/ground_truth'

    # last checkpoint
    checkpointfile = os.path.join(checkpoint_dir, 'checkpoint.94.pth.tar')

    # some description
    method = 'mcnn'
    dataset_name = 'SH_B'

    # log file
    f_train_loss = open(os.path.join(log_dir, "train_loss.csv"), "a+")
    f_val_loss = open(os.path.join(log_dir, "val_loss.csv"), "a+")

    # Training configuration
    start_epoch = 0
    end_epoch = 97
    lr = 0.00001
    # momentum = 0.9
    disp_interval = 1000
    # log_interval = 250

    # Flag
    CONTINUE_TRAIN = True
    # Tensorboard  config

    # use_tensorboard = False
    # save_exp_name = method + '_' + dataset_name + '_' + 'v1'
    # remove_all_log = False   # remove all historical experiments in TensorBoard
    # exp_name = None # the previous experiment name in TensorBoard

    # -----------------------------------------------------------------------------------------
    rand_seed = 64678
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)

    # Define network
    net = CrowdCounter()
    network.weights_normal_init(net, dev=0.01)
    # net.cuda()
    net.train()
    # params = list(net.parameters())
    optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # # tensorboad
    # use_tensorboard = use_tensorboard and CrayonClient is not None
    # if use_tensorboard:
    #     cc = CrayonClient(hostname='127.0.0.1')
    #     if remove_all_log:
    #         cc.remove_all_experiments()
    #     if exp_name is None:
    #         exp_name = save_exp_name
    #         exp = cc.create_experiment(exp_name)
    #     else:
    #         exp = cc.open_experiment(exp_name)

    # training param

    if CONTINUE_TRAIN:
        net, optimizer, start_epoch = utils.load_checkpoint(
            net, optimizer, filename=checkpointfile)

    train_loss = 0
    step_cnt = 0
    re_cnt = False
    t = Timer()
    t.tic()

    # Load data
    data_loader = ImageDataLoader(
        train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=True)
    data_loader_val = ImageDataLoader(
        val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True)
    best_mae = sys.maxsize

    # Start training

    for this_epoch in range(start_epoch, end_epoch-1):
        step = -1
        train_loss = 0
        for blob in data_loader:
            step += 1
            img_data = blob['data']
            gt_data = blob['gt_density']
            et_data = net(img_data, gt_data)
            loss = net.loss
            train_loss += loss.data
            step_cnt += 1
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % disp_interval == 0:
                duration = t.toc(average=False)
                fps = step_cnt / duration
                gt_count = np.sum(gt_data)
                et_data = et_data.data.cpu().numpy()
                et_count = np.sum(et_data)
                utils.save_results(img_data, gt_data, et_data, output_dir,
                                   fname="{}.{}.png".format(this_epoch, step))
                log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (this_epoch,
                                                                                                step, 1./fps, gt_count, et_count)
                log_print(log_text, color='green', attrs=['bold'])
                re_cnt = True

            if re_cnt:
                t.tic()
                re_cnt = False

        # Save checkpoint
        state = {'epoch': this_epoch, 'state_dict': net.state_dict(),
                 'optimizer': optimizer.state_dict()}
        cp_filename = "checkpoint.{}.pth.tar".format(this_epoch)
        torch.save(state, os.path.join(checkpoint_dir, cp_filename))
# ========================== END 1 EPOCH==================================================================================
        train_mae, train_mse = evaluate_network(net, data_loader)
        f_train_loss.write("{},{}\n".format(train_mae, train_mse))
        log_text = 'TRAINING - EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (
            this_epoch, train_mae, train_mse)
        log_print(log_text, color='green', attrs=['bold'])
# =====================================================VALIDATION=========================================================
        # calculate error on the validation dataset
        val_mae, val_mse = evaluate_network(net, data_loader_val)
        f_val_loss.write("{},{}\n".format(val_mae, val_mse))
        log_text = 'VALIDATION - EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (
            this_epoch, val_mae, val_mse)
        log_print(log_text, color='green', attrs=['bold'])
        # SAVE model
        is_save = False
        if val_mae <= best_mae:
            if val_mae < best_mae:
                is_save = True
                best_mae = val_mae
                best_mse = val_mse
            else:
                if val_mse < best_mse:
                    is_save = True
                    best_mse = val_mse

        if is_save:
            save_name = os.path.join(output_dir, '{}_{}_{}.h5'.format(
                method, dataset_name, this_epoch))
            network.save_net(save_name, net)
            best_model = '{}_{}_{}.h5'.format(method, dataset_name, this_epoch)
            log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % (
                best_mae, best_mse, best_model)
            log_print(log_text, color='green', attrs=['bold'])

        # if use_tensorboard:
        #     exp.add_scalar_value('MAE', mae, step=epoch)
        #     exp.add_scalar_value('MSE', mse, step=epoch)
        #     exp.add_scalar_value('train_loss', train_loss /
        #                          data_loader.get_num_samples(), step=epoch)

    f_train_loss.close()
    f_val_loss.close()
Ejemplo n.º 18
0
def testimage(modelname, camname):
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False
    vis = False
    save_output = False

    #test data and model file path
    if camname == 0:
        data_path = '../data/test/images/'
    else:
        data_path = '../data/test/images2/'

    if modelname == 'A':
        model_path = './final_models/cmtl_shtechA_204.h5'
    else:
        model_path = './final_models/cmtl_shtechB_768.h5'
    print("Model name:", modelname, " Camname: ", camname)
    gt_flag = False
    if gt_flag:
        gt_path = '../dataset/ShanghaiTech/part_A/test_data/ground_truth/'

    # =============================================================================
    # for i in range(1, 4):
    #     gt_name = os.path.join(gt_path,'img_' + format(i, '04') + '_ann.mat')
    #     print(gt_name)
    #     x = loadmat(gt_name)
    #     print (len(x['annPoints']))
    #
    # =============================================================================
    output_dir = './output/'

    model_name = os.path.basename(model_path).split('.')[0]
    file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    #load test data
    data_loader = ImageDataLoader(data_path,
                                  shuffle=False,
                                  gt_downsample=True,
                                  pre_load=True)

    net = CrowdCounter()

    trained_model = os.path.join(model_path)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    i = 1
    #df = pd.read_csv("../etcount.csv")
    #df = df.set_index('IMG_NAME')
    #df['GROUND_TRUTH'] = 0.0
    #df['MTL-v4-A10'] = 0.0

    for blob in data_loader:
        if gt_flag:
            gt_name = os.path.join(
                gt_path, 'GT_' + format(blob['fname'].split('.')[0]) + '.mat')
            x = loadmat(gt_name)
            #gt_count = len(x['image_info'][0][0][0][0][0])
            #df.at[blob['fname'].split('.')[0], 'GROUND_TRUTH'] = gt_count
            i += 1
        im_data = blob['data']
        density_map = net(im_data)
        density_map = density_map.data.cpu().numpy()
        x = len(density_map[0][0])
        y = len(density_map[0][0][0])
        half = (int)(x / 2)
        density_map1 = density_map[0][0][0:half][:]
        density_map2 = density_map[0][0][half:][:]

        print(x, y)
        et_c1 = np.sum(density_map1)
        et_c2 = np.sum(density_map2)
        side = 'none'
        if et_c1 > et_c2:
            side = 'right'
        else:
            side = 'left'
        print(et_c1, et_c2)
        et_count = np.sum(density_map)

        print(blob['fname'].split('.')[0], ' Model Estimated count : ',
              et_count)
        #df.at[blob['fname'].split('.')[0], 'MTL-v4-A'] = et_count
        if vis:
            utils.display_results(im_data, density_map)
        if save_output:
            utils.save_density_map(
                density_map, output_dir,
                'output_' + blob['fname'].split('.')[0] + '.png')

    return (et_count, side)

    #df.to_csv('../etcount.csv')


#testimage('A', 1)

#training configuration
disp_interval = 500


# ------------
rand_seed = 64678  
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)


# load net
net = CrowdCounter()
network.weights_normal_init(net, dev=0.01)




trained_model= 'pretrained.h5'

if args.MODE == 'val':
    trained_model= args.SAVE_ROOT+'/'+args.Dataset+'_trained_model.h5'
    print('loading supervised trained model weights')
if args.MODE == 'test':
    trained_model= args.SAVE_ROOT+'/'+args.Dataset+'_self_trained_model_val.h5'
    print('loading self learned val model weights')

    
Ejemplo n.º 20
0
#Tensorboard  config
use_tensorboard = False
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False   # remove all historical experiments in TensorBoard
exp_name = None # the previous experiment name in TensorBoard

# ------------
rand_seed = 64678  
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)


# load net
net = CrowdCounter()
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
    cc = CrayonClient(hostname='127.0.0.1')
    if remove_all_log:
Ejemplo n.º 21
0
    train_gt_path = './data/shanghaiA_100patches/train_den/'

    # end_step = 1000
    start_epoch = 0
    lr = 0.00001
    momentum = 0.9
    # disp_interval = 5000
    # log_interval = 250
    pretrained_vgg16 = False
    fine_tune = False
    rand_seed = 64678
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)
    net = CrowdCounter()
    network.weights_normal_init(net, dev=0.01)
    if pretrained_vgg16:
        vgg16_model = models.vgg16(pretrained=True)
        # vgg16_model.cuda()
        net.DA_Net.copy_params_from_vgg16(vgg16_model)
    net.to(device)

    Loss_list = []
    for epoch in range(start_epoch, start_epoch + 2):
        train_loss = train_DA(epoch)
        Loss_list.append(train_loss)
        if epoch == 1:
            test_DA(epoch)

    # 记录train_loss
Ejemplo n.º 22
0
def open_model(model_path):
    model = CrowdCounter()
    network.load_net(model_path, model)
    model.eval()
    return model