def train(net, train_loader,optimizer, num_epochs):
    log_file = open(args.SAVE_ROOT+"/"+args.Dataset+"_training.log","w",1)
    log_print("Training ....", color='green', attrs=['bold'])
    # training
    train_loss = 0
    step_cnt = 0
    re_cnt = False
    t = Timer()
    t.tic()
    for epoch in range(1,num_epochs+1):
        step = -1
        train_loss = 0
        for blob in train_loader:                
            step = step + 1        
            im_data = blob['data']
            gt_data = blob['gt_density']
            density_map = net(im_data, gt_data)
            loss = net.loss
            train_loss += loss.data
            step_cnt += 1
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if step % disp_interval == 0:            
                duration = t.toc(average=False)
                fps = step_cnt / duration
                gt_count = np.sum(gt_data)    
                density_map = density_map.data.cpu().numpy()
                et_count = np.sum(density_map)
                utils.save_results(im_data,gt_data,density_map, args.SAVE_ROOT)
                log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (epoch,
                    step, 1./fps, gt_count,et_count)
                log_print(log_text, color='green', attrs=['bold'])
                re_cnt = True   
            if re_cnt:                                
                t.tic()
                re_cnt = False
    return net
Пример #2
0
        density_map = net(im_data, gt_data, gt_class_label, class_wts)
        loss = net.loss
        train_loss += loss.data[0]
        step_cnt += 1
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step % disp_interval == 0:
            duration = t.toc(average=False)
            fps = step_cnt / duration
            gt_count = np.sum(gt_data)
            density_map = density_map.data.cpu().numpy()
            et_count = np.sum(density_map)
            utils.save_results(im_data, gt_data, density_map, output_dir)
            log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (
                epoch, step, 1. / fps, gt_count, et_count)
            log_print(log_text, color='green', attrs=['bold'])
            re_cnt = True

        if re_cnt:
            t.tic()
            re_cnt = False

    if (epoch % 2 == 0):
        save_name = os.path.join(
            output_dir, '{}_{}_{}.h5'.format(method, dataset_name, epoch))
        network.save_net(save_name, net)
        #calculate error on the validation dataset
        mae, mse = evaluate_model(save_name, data_loader_val)
def test(net,test_path,optimizer, num_epochs, Dataset=args.Dataset):

    if Dataset=="fdst":
      num_sessions=3
      test_len=750
      low_limit=451
      high_limit=750
    else:
        num_sessions=8
        test_len=2000
        low_limit=1201
        high_limit=2000
    #print(num_sessions)


    sessions_list = []
    ses_size = 100
    
    for i in range(low_limit, high_limit,ses_size): 
      sessions_list.append(i)
    sessions_list.append(test_len)
    #print("test list: ", sessions_list)
    for test_inc in range(len(sessions_list)-1):
        start_frame = sessions_list[test_inc]
        end_frame = sessions_list[test_inc+1]
        #print('start:,end:', (start_frame,end_frame))

        test_loader = ImageDataLoader_Val_Test(test_path, None,'test_split',start_frame, end_frame, shuffle=False, gt_downsample=True, pre_load=True, Dataset=args.Dataset)
        log_file = open(args.SAVE_ROOT+"/"+args.Dataset+"_test.log","w",1)
        log_print("test/Self Training ....", color='green', attrs=['bold'])
        # training
        train_loss = 0
        step_cnt = 0
        re_cnt = False
        t = Timer()
        t.tic()
        for epoch in range(1,num_epochs+1):
            step = -1
            train_loss = 0
            for blob in test_loader:                
                step = step + 1        
                im_data = blob['data']
                net.training = False
                gt_data = net(im_data)
                gt_data = gt_data.cpu().detach().numpy()
                net.training = True
                density_map = net(im_data, gt_data)
                loss = net.loss
                train_loss += loss.data
                step_cnt += 1
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            
                if step % disp_interval == 0:            
                  duration = t.toc(average=False)
                  fps = step_cnt / duration
                  gt_count = np.sum(gt_data)    
                  density_map = density_map.data.cpu().numpy()
                  et_count = np.sum(density_map)
                  utils.save_results(im_data,gt_data,density_map, args.SAVE_ROOT)
                  log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (epoch,
                      step, 1./fps, gt_count,et_count)
                  log_print(log_text, color='green', attrs=['bold'])
                  re_cnt = True   
                if re_cnt:                                
                  t.tic()
                  re_cnt = False

        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = False 

        session= str(sessions_list[test_inc])
        network.save_net(args.SAVE_ROOT+'/'+args.Dataset+ session +'_self_trained_model_test.h5', net) 
        output_dir = './densitymaps/' + args.Dataset + session 
        net.cuda()
        net.eval()

        all_test_loader = ImageDataLoader(test_path, None, 'test_split', shuffle=False, gt_downsample=True, pre_load=True , Dataset=args.Dataset)

        for blob in all_test_loader:                        
            im_data = blob['data']
            net.training = False
            density_map = net(im_data)
            density_map = density_map.data.cpu().numpy()
            new_dm= density_map.reshape([ density_map.shape[2], density_map.shape[3] ])
            
            np.savetxt(output_dir + '_output_' + blob['fname'].split('.')[0] +'.csv', new_dm, delimiter=',', fmt='%.6f')

    return net
Пример #4
0
                              cf.batch_size),
            steps_per_epoch=(len(only_parasite_generator.files_list) //
                             cf.batch_size),
            epochs=cf.parasite_epochs,
            verbose=1,
            callbacks=[tensorboard])

        model.fit_generator(
            generator=overall_generator.generate(),
            validation_data=validation_generator.generate(),
            validation_steps=(len(validation_generator.files_list) //
                              cf.batch_size),
            steps_per_epoch=(len(overall_generator.files_list) //
                             cf.batch_size),
            epochs=cf.overall_epochs,
            verbose=1,
            callbacks=[tensorboard])

        model.save_weights(os.path.join(cf.train_output_path, 'weights.hdf5'))

    elif args.action is 'test':
        weights = os.path.join(cf.train_output_path, 'weights.hdf5')
        save_results(cf.test_data_path, weights, cf.train_output_path,
                     cf.save_predictions, cf.save_regions)

        if cf.print_overall_table:
            overall_table(cf.test_data_path, cf.test_labels_path, weights)

        if cf.print_jaccard_table:
            jaccard_table(cf.test_data_path, cf.test_labels_path, weights)
Пример #5
0
    def word_translation(self):
        """
        Evaluation on word translation.
        """
        # mapped word embeddings
        all_emb = {
            l2: apply_mapping(self.mapping[l2],
                              self.embs[l2].weight).data.cpu()
            for l2 in self.params.langs
        }
        results = defaultdict(dict)
        # for computational efficiency, iterate over source languages and calculate all methods for each one
        for src_lang in self.params.langs:
            logger.info('\n\n\n\nSource Language: {}\n\n\n\n'.format(src_lang))
            torch.cuda.empty_cache()

            # get source queries
            paths = self.get_dico_paths(src_lang)
            query_ids = self.aggregate_query_ids(paths, src_lang)
            if query_ids is None:
                logger.info(
                    'Warning: No test dictionary was found for source language {}. Skipping!'
                    .format(src_lang))
                continue

            method = 'csls_knn_10'
            # init translation
            init_trans, top_scores = BI_translation(src_lang,
                                                    query_ids,
                                                    method,
                                                    all_emb,
                                                    cuda=self.params.cuda)

            for inf_met in self.params.multilingual_inference_method:
                logger.info('\n\nMultilingual inference method: {}\n\n'.format(
                    inf_met))

                # improve source word representation, and re-translate
                if inf_met != 'BI':
                    updated_trans, used_langs = update_translation_for_all_langs(
                        self.params.langs,
                        src_lang,
                        query_ids,
                        all_emb,
                        init_trans,
                        method,
                        inf_met,
                        top_scores,
                        cuda=self.params.cuda)
                else:
                    used_langs = None
                    updated_trans = init_trans

                # re-arrange translations for convenience
                translation_by_src_id, used_langs_by_src_id = self.translation_by_src_id(
                    updated_trans, used_langs, src_lang)

                # calcualte accuracy, and matching per source word
                for tgt_lang, path in paths.items():
                    pair_result = self.get_pair_accuracy(
                        path, src_lang, self.lang_dico[src_lang].word2id,
                        tgt_lang, self.lang_dico[tgt_lang].word2id,
                        translation_by_src_id, method)
                    if inf_met != 'BI':
                        self.print_aux_statistics(src_lang, tgt_lang, path,
                                                  used_langs_by_src_id)
                    results[(src_lang, tgt_lang)][inf_met] = pair_result

        save_results(self.params, results,
                     self.params.multilingual_inference_method)
Пример #6
0
model_path = './saved_models/mcnn_shtechA_1998.h5'

output_dir = './output/'

net = CrowdCounter()
"""Load trained model"""
trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
gt_data = None
"""Load image from data_path"""
im_data = read_image(data_path)
"""Calculate density map"""
density_map = net(im_data, gt_data)
"""convert to numpy array"""
density_map = density_map.data.cpu().numpy()
"""estimation count from density map"""
et_count = np.sum(density_map)
"""Save result: density map; stack input image and density map"""
utils.save_results(im_data, int(et_count), density_map, output_dir)
utils.save_density_map(density_map, output_dir)

import matplotlib.pyplot as plt

img = cv2.imread('./output/results.png')
plt.imshow(img, cmap='gray')
plt.show()
print et_count
Пример #7
0
def main():
    # define output folder
    output_dir = './saved_models/'
    log_dir = './mae_mse/'
    checkpoint_dir = './checkpoint/'

    train_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/train/images'
    train_gt_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/train/ground_truth'
    val_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/val/images'
    val_gt_path = '/home/jake/Desktop/Projects/Python/dataset/SH_B/cooked/val/ground_truth'

    # last checkpoint
    checkpointfile = os.path.join(checkpoint_dir, 'checkpoint.94.pth.tar')

    # some description
    method = 'mcnn'
    dataset_name = 'SH_B'

    # log file
    f_train_loss = open(os.path.join(log_dir, "train_loss.csv"), "a+")
    f_val_loss = open(os.path.join(log_dir, "val_loss.csv"), "a+")

    # Training configuration
    start_epoch = 0
    end_epoch = 97
    lr = 0.00001
    # momentum = 0.9
    disp_interval = 1000
    # log_interval = 250

    # Flag
    CONTINUE_TRAIN = True
    # Tensorboard  config

    # use_tensorboard = False
    # save_exp_name = method + '_' + dataset_name + '_' + 'v1'
    # remove_all_log = False   # remove all historical experiments in TensorBoard
    # exp_name = None # the previous experiment name in TensorBoard

    # -----------------------------------------------------------------------------------------
    rand_seed = 64678
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)

    # Define network
    net = CrowdCounter()
    network.weights_normal_init(net, dev=0.01)
    # net.cuda()
    net.train()
    # params = list(net.parameters())
    optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, net.parameters()), lr=lr)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # # tensorboad
    # use_tensorboard = use_tensorboard and CrayonClient is not None
    # if use_tensorboard:
    #     cc = CrayonClient(hostname='127.0.0.1')
    #     if remove_all_log:
    #         cc.remove_all_experiments()
    #     if exp_name is None:
    #         exp_name = save_exp_name
    #         exp = cc.create_experiment(exp_name)
    #     else:
    #         exp = cc.open_experiment(exp_name)

    # training param

    if CONTINUE_TRAIN:
        net, optimizer, start_epoch = utils.load_checkpoint(
            net, optimizer, filename=checkpointfile)

    train_loss = 0
    step_cnt = 0
    re_cnt = False
    t = Timer()
    t.tic()

    # Load data
    data_loader = ImageDataLoader(
        train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=True)
    data_loader_val = ImageDataLoader(
        val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True)
    best_mae = sys.maxsize

    # Start training

    for this_epoch in range(start_epoch, end_epoch-1):
        step = -1
        train_loss = 0
        for blob in data_loader:
            step += 1
            img_data = blob['data']
            gt_data = blob['gt_density']
            et_data = net(img_data, gt_data)
            loss = net.loss
            train_loss += loss.data
            step_cnt += 1
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % disp_interval == 0:
                duration = t.toc(average=False)
                fps = step_cnt / duration
                gt_count = np.sum(gt_data)
                et_data = et_data.data.cpu().numpy()
                et_count = np.sum(et_data)
                utils.save_results(img_data, gt_data, et_data, output_dir,
                                   fname="{}.{}.png".format(this_epoch, step))
                log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (this_epoch,
                                                                                                step, 1./fps, gt_count, et_count)
                log_print(log_text, color='green', attrs=['bold'])
                re_cnt = True

            if re_cnt:
                t.tic()
                re_cnt = False

        # Save checkpoint
        state = {'epoch': this_epoch, 'state_dict': net.state_dict(),
                 'optimizer': optimizer.state_dict()}
        cp_filename = "checkpoint.{}.pth.tar".format(this_epoch)
        torch.save(state, os.path.join(checkpoint_dir, cp_filename))
# ========================== END 1 EPOCH==================================================================================
        train_mae, train_mse = evaluate_network(net, data_loader)
        f_train_loss.write("{},{}\n".format(train_mae, train_mse))
        log_text = 'TRAINING - EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (
            this_epoch, train_mae, train_mse)
        log_print(log_text, color='green', attrs=['bold'])
# =====================================================VALIDATION=========================================================
        # calculate error on the validation dataset
        val_mae, val_mse = evaluate_network(net, data_loader_val)
        f_val_loss.write("{},{}\n".format(val_mae, val_mse))
        log_text = 'VALIDATION - EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (
            this_epoch, val_mae, val_mse)
        log_print(log_text, color='green', attrs=['bold'])
        # SAVE model
        is_save = False
        if val_mae <= best_mae:
            if val_mae < best_mae:
                is_save = True
                best_mae = val_mae
                best_mse = val_mse
            else:
                if val_mse < best_mse:
                    is_save = True
                    best_mse = val_mse

        if is_save:
            save_name = os.path.join(output_dir, '{}_{}_{}.h5'.format(
                method, dataset_name, this_epoch))
            network.save_net(save_name, net)
            best_model = '{}_{}_{}.h5'.format(method, dataset_name, this_epoch)
            log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % (
                best_mae, best_mse, best_model)
            log_print(log_text, color='green', attrs=['bold'])

        # if use_tensorboard:
        #     exp.add_scalar_value('MAE', mae, step=epoch)
        #     exp.add_scalar_value('MSE', mse, step=epoch)
        #     exp.add_scalar_value('train_loss', train_loss /
        #                          data_loader.get_num_samples(), step=epoch)

    f_train_loss.close()
    f_val_loss.close()
Пример #8
0
#load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)

for blob in data_loader:
    im_data = blob['data']
    gt_data = blob['gt_density']
    density_map = net(im_data, gt_data)
    density_map = density_map.data.cpu().numpy()
    gt_count = np.sum(gt_data)
    et_count = np.sum(density_map)
    mae += abs(gt_count - et_count)
    mse += ((gt_count - et_count) * (gt_count - et_count))
    print("Ground truth: {:0.2f}, Estimate: {:0.2f}".format(
        gt_count, et_count))
    if vis:
        utils.display_results(im_data, gt_data, density_map)
    if save_output:
        utils.save_results(im_data, gt_data, density_map, output_dir,
                           'output_' + blob['fname'].split('.')[0] + '.png')

mae = mae / data_loader.get_num_samples()
mse = np.sqrt(mse / data_loader.get_num_samples())
print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse))

# f = open(file_results, 'w')
# f.write('MAE: %0.2f, MSE: %0.2f' % (mae,mse))
# f.close()