Esempio n. 1
0
def test(config):
    config[
        'num_threads'] = 1  # only <num_threads = 1> supported when testing_usr
    config['flip'] = False  # not allowed to flip image
    config['add_colorjit'] = False  # not allowed to use color jitting

    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)

    result_root_path = os.path.join(config['checkpoints_dir'], config['name'],
                                    config['results_dir'])
    util.mkdir(result_root_path)
    print(" create testing_usr folder: " + result_root_path)

    # set module to testing_usr mode
    model.eval()

    for i, data in enumerate(dataset):
        model.set_input(data)  # push test datasets to module
        model.test()  # forward module

        for k in range(len(model.test_result)):
            img = util.tensor2im(model.test_result[k][1])
            img_path = os.path.join(result_root_path, data['PATH'][0])
            util.save_image(img, img_path)

        print("Testing forward-- complete:" + str(i + 1) + "  total:" +
              str(dataset.__len__()))
Esempio n. 2
0
def test(config):
    config['num_threads'] = 1                     # only <num_threads = 1> supported when testing_usr

    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)

    result_root_path = os.path.join(config['checkpoints_dir'], config['name'], config['results_dir'],'epoch_'+str(config['test_epoch']))
    tools.mkdir(result_root_path)
    print(" create testing_usr folder: " + result_root_path)

    # set module to testing_usr mode
    model.eval()

    for i, data in enumerate(dataset):
        model.set_input(data)  # push test datasets to module
        model.test()  # forward module

        datapoint_offset = model.test_result[0][1]
        datapoint_offset = (datapoint_offset.squeeze(0)).cpu().data.numpy()
        datapoint_bg = ((model.test_result[1][1].squeeze(0).permute(1,2,0)).cpu().data.numpy()+1.0)*0.5*255.
        index = data["PATH"].cpu().data.numpy()[0]
        plot_motion.plot_motion_field(motion_vector=datapoint_offset*5.,
                                      savepath=os.path.join(result_root_path,str(index)+".jpg"),
                                      bg=datapoint_bg.astype(np.int),
                                      limits=0,
                                      plot_interval=8,
                                      plot_size=10)

        print("Testing forward-- complete:" + str(i + 1) + "  total:" + str(dataset.__len__()))

    print("Testing result have been saved!")
Esempio n. 3
0
def train(config):
    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)
    dataset_size = len(dataset)  # get the size of dataset
    print('The number of training images = %d' % dataset_size)
    visualizer = Visualizer(config)  # create visualizer to show/save iamge


    total_iters = 0  # total iteration for datasets points
    t_data = 0

    # 从训练的模型中恢复训练
    if int(config['resume_epoch']) > 0:
        print("\n resume traing from rpoch " + str(int(config['resume_epoch']))+" ...")
        model.resume_scheduler(int(config['resume_epoch']))
        model.load_networks(config['resume_epoch'])
        model.load_optimizers(config['resume_epoch'])

    # outter iteration for differtent epoch; we save module via <epoch_count> and <epoch_count>+<save_latest_freq> options
    for epoch in range(int(config['resume_epoch'])+1, int(config['epoch']) +1):
        epoch_start_time = time.time()  # note the starting time for current epoch
        iter_data_time = time.time()  # note the starting time for datasets iteration
        epoch_iter = 0  # iteration times for current epoch, reset to 0 for each epoch

        # innear iteration for single epoch
        for i, data in enumerate(dataset):
            iter_start_time = time.time()  # note the stating time for current iteration
            if total_iters % int(config['print_freq']) == 0:  # note during time each <print_freq> times iteration
                t_data = iter_start_time - iter_data_time
            visualizer.reset()
            total_iters = total_iters + int(config['train_batch_size'])
            epoch_iter = epoch_iter + int(config['train_batch_size'])
            model.set_input(data)  # push loading image to the module
            model.optimize_parameters()  # calculate loss, gradient and refresh module parameters

            if total_iters % int(config['display_freq']) == 0:  # show runing result in visdom each <display_freq> iterations
                save_result = total_iters % int(config['update_html_freq']) == 0  # save runing result to html each <update_html_freq> iteartions
                visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)

            if total_iters % int(config['print_freq']) == 0:  # print/save training loss to console each <print_freq> iterations
                losses = model.get_current_losses()
                t_comp = (time.time() - iter_start_time) / int(config['train_batch_size'])
                visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
                if int(config['display_id']) > 0:
                    visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)

        if epoch % int(config['save_epoch_freq']) == 0:  # save module each <save_epoch_freq> epoch iterations
            print('saving the module at the end of epoch %d, iters %d' % (epoch, total_iters))
            model.save_networks(epoch)
            model.save_optimizers(epoch)

        print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, int(config['epoch']), time.time() - epoch_start_time))

        # update learning rate after each epoch
        model.update_learning_rate()
Esempio n. 4
0
def test(config):
    config[
        'num_threads'] = 1  # only <num_threads = 1> supported when testing_usr
    config['flip'] = False  # not allowed to flip image
    config['status'] = 'test'
    config['crop_scale'] = 1.0

    dataset = create_dataset(config)
    model = create_model(config)
    model.setup(config)

    result_root_path = os.path.join(config['checkpoints_dir'], config['name'],
                                    'evaluation')
    util.mkdir(result_root_path)
    util.mkdir(os.path.join(result_root_path, 'prediction_distance'))
    util.mkdir(os.path.join(result_root_path, 'prediction_heatmap'))
    print(" create evaluate folder: " + result_root_path)

    # set module to testing_usr mode
    model.eval()

    save_npy = np.ndarray(shape=(dataset.__len__() + 1, 2), dtype=np.float)
    save_npy[0][0], save_npy[0][1] = -1, -1

    for i, data in enumerate(dataset):
        model.set_input(data)  # push test datasets to module
        model.test()  # forward module

        datapoints = (model.test_result[0][1]).cpu().data.numpy()
        index = data["PATH"].cpu().data.numpy()[0]
        save_npy[index][0], save_npy[index][1] = datapoints[0][0], datapoints[
            0][1]

        dist_img = model.test_result[1][1]
        util.save_image(
            util.tensor2im(dist_img),
            os.path.join(result_root_path, 'prediction_distance',
                         str(index) + ".png"))

        heatmap_img = model.test_result[2][1]
        util.save_image(
            util.tensor2im(heatmap_img),
            os.path.join(result_root_path, 'prediction_heatmap',
                         str(index) + ".png"))

        print("Evaluate forward-- complete:" + str(i + 1) + "  total:" +
              str(dataset.__len__()))

    np.save(os.path.join(result_root_path, 'regression.npy'), save_npy)
    l2_dist, easy_dist, hard_dist = evaluation.evaluate_detailed(save_npy)
    print("Testing npy result have been saved! Evaluation distance: " +
          str(round(l2_dist)) + "(" + str(round(easy_dist)) + "," +
          str(round(hard_dist)) + ")")
Esempio n. 5
0
from sklearn.externals import joblib

from modules import normalize_input, create_model

print("Enter train content file address:")
train_content_file_address = input()
print("Enter train label file address:")
train_label_file_address = input()

learn_data = normalize_input(train_content_file_address,
                             train_label_file_address)
model = create_model()
model.fit(learn_data.data, learn_data.target)
joblib.dump(model, "model.txt")

print("The model is saved in model.txt")
path_val = None
t_size = 10

path_X = os.path.join(data_path,'ExtractedFrames.h5')
path_Y = os.path.join(data_path,'labels.pkl')

data = h5py.File(path_X,'r')
    
X = data['X']

with open(path_Y, 'rb') as handler:
    Y = pickle.load(handler)

n_categories = len(Y[0].keys())

resnet_hybrid = create_model(5)

criterion = nn.MSELoss()
#    criterion = criterion.cuda()
optimizer = optim.SGD(
    resnet_hybrid.parameters(),
    lr=learning_rate,
    momentum=momentum,
    weight_decay=weight_decay,
    nesterov=nesterov)
scheduler = lr_scheduler.ReduceLROnPlateau(
    optimizer, 'min', patience=20)

opt = type('', (), {})() #create empty object
opt.arch = 'resnet-101'
opt.result_path = data_path