Exemplo n.º 1
0
def train_net():
    '''Main training function'''
    # Set up the model and the solver
    NetClass = load_model(cfg.CONST.NETWORK_CLASS)

    # print('Network definition: \n')
    # print(inspect.getsource(NetClass.network_definition))
    net = NetClass()

    # Check that single view reconstruction net is not used for multi view
    # reconstruction.
    if net.is_x_tensor4 and cfg.CONST.N_VIEWS > 1:
        raise ValueError('Do not set the config.CONST.N_VIEWS > 1 when using' \
                         'single-view reconstruction network')

    # Prefetching data processes
    #
    # Create worker and data queue for data processing. For training data, use
    # multiple processes to speed up the loading. For validation data, use 1
    # since the queue will be popped every TRAIN.NUM_VALIDATION_ITERATIONS.
    global train_queue, val_queue, train_processes, val_processes
    train_queue = Queue(cfg.QUEUE_SIZE)
    val_queue = Queue(cfg.QUEUE_SIZE)

    train_processes = make_data_processes(
        train_queue,
        category_model_id_pair(dataset_portion=cfg.TRAIN.DATASET_PORTION),
        cfg.TRAIN.NUM_WORKER,
        repeat=True)
    val_processes = make_data_processes(
        val_queue,
        category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION),
        1,
        repeat=True,
        train=False)

    import torch.cuda
    if torch.cuda.is_available():
        net.cuda()

    # print the queue
    # print(train_queue)
    # print(val_queue)

    # Generate the solver
    solver = Solver(net)

    # Train the network
    solver.train(train_queue, val_queue)

    # Cleanup the processes and the queue.
    kill_processes(train_queue, train_processes)
    kill_processes(val_queue, val_processes)
Exemplo n.º 2
0
def main():
    args = [(category, model_id)
            for category, model_id in category_model_id_pair(
                dataset_portion=[0, 1])]

    with Pool(processes=NUM_PROCESSES) as pool:
        pool.starmap(evaluate_grasps, args)
Exemplo n.º 3
0
def test_net():
    ''' Evaluate the network '''
    # Make result directory and the result file.
    result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_fn = os.path.join(result_dir, 'result.mat')

    print("Exp file will be written to: " + result_fn)

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)
    print('Network definition: \n')
    print(inspect.getsource(NetworkClass.network_definition))
    net = NetworkClass(compute_grad=False)
    net.load(cfg.CONST.WEIGHTS)
    solver = Solver(net)

    # set constants
    batch_size = cfg.CONST.BATCH_SIZE

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(
        dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue,
                                    data_pair,
                                    1,
                                    repeat=False,
                                    train=False)
    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / batch_size)

    # prepare result container
    results = {'cost': np.zeros(num_batch)}
    for thresh in cfg.TEST.VOXEL_THRESH:
        results[str(thresh)] = np.zeros((num_batch, batch_size, 5))

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        pred, loss, activations = solver.test_output(batch_img, batch_voxel)
        print('%d/%d, cost is: %f' % (batch_idx, num_batch, loss))

        for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
            for j in range(batch_size):
                r = evaluate_voxel_prediction(pred[j, ...],
                                              batch_voxel[j, ...], thresh)
                results[str(thresh)][batch_idx, j, :] = r

        # record result for the batch
        results['cost'][batch_idx] = float(loss)
        batch_idx += 1

    print('Total loss: %f' % np.mean(results['cost']))
    sio.savemat(result_fn, results)
def train_net():

    # Set up the model and the solver
    my_net = My_ResidualGRUNet()

    # Generate the solver
    solver = Solver(my_net)

    # Load the global variables
    global train_queue, validation_queue, train_processes, val_processes

    # Initialize the queues
    train_queue = Queue(
        15)  # maximum number of minibatches that can be put in a data queue)
    validation_queue = Queue(15)

    # Train on 80 percent of the data
    train_dataset_portion = [0, 0.8]

    # Validate on 20 percent of the data
    test_dataset_portion = [0.8, 1]

    # Establish the training procesesses
    train_processes = make_data_processes(
        train_queue,
        category_model_id_pair(dataset_portion=train_dataset_portion),
        1,
        repeat=True)

    # Establish the validation procesesses
    val_processes = make_data_processes(
        validation_queue,
        category_model_id_pair(dataset_portion=test_dataset_portion),
        1,
        repeat=True,
        train=False)

    # Train the network
    solver.train(train_queue, validation_queue)

    # Cleanup the processes and the queue.
    kill_processes(train_queue, train_processes)
    kill_processes(validation_queue, val_processes)
Exemplo n.º 5
0
def test_process():
    from multiprocessing import Queue
    from lib.config import cfg
    from lib.data_io import category_model_id_pair

    cfg.TRAIN.PAD_X = 10
    cfg.TRAIN.PAD_Y = 10

    data_queue = Queue(2)
    category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])

    data_process = ReconstructionDataProcess(data_queue, category_model_pair)
    data_process.start()
    batch_img, batch_voxel = data_queue.get()

    kill_processes(data_queue, [data_process])
Exemplo n.º 6
0
def demo(args):
    ''' Evaluate the network '''

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)
    print('Network definition: \n')
    print(inspect.getsource(NetworkClass.network_definition))
    net = NetworkClass(compute_grad=False)
    net.load(cfg.CONST.WEIGHTS)
    solver = Solver(net)

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue, data_pair, 1, repeat=False, train=False)
    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / args.batch_size)

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        pred, loss, activations = solver.test_output(batch_img, batch_voxel)

        if (batch_idx < args.exportNum):
            # Save the prediction to an OBJ file (mesh file).
            print('saving {}/{}'.format(batch_idx, args.exportNum - 1))
            voxel2obj('out/prediction_{}b_{}.obj'.format(args.batch_size, batch_idx),
                      pred[0, :, 1, :, :] > cfg.TEST.VOXEL_THRESH)
        else:
            break

        batch_idx += 1

    if args.file:
        # Use meshlab or other mesh viewers to visualize the prediction.
        # For Ubuntu>=14.04, you can install meshlab using
        # `sudo apt-get install meshlab`
        if cmd_exists('meshlab'):
            call(['meshlab', 'obj/{}.obj'.format(args.file)])
        else:
            print('Meshlab not found: please use visualization of your choice to view %s' %
                  args.file)
Exemplo n.º 7
0
def demo(args):
    ''' Evaluate the network '''

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)
    print('Network definition: \n')
    print(inspect.getsource(NetworkClass.network_definition))
    net = NetworkClass(compute_grad=False)
    net.load(cfg.CONST.WEIGHTS)
    solver = Solver(net)

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(
        dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue,
                                    data_pair,
                                    1,
                                    repeat=False,
                                    train=False)
    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / args.batch_size)

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        pred, loss, activations = solver.test_output(batch_img, batch_voxel)

        if (batch_idx < args.exportNum):
            # Save the prediction to an OBJ file (mesh file).
            print('saving {}/{}'.format(batch_idx, args.exportNum - 1))
            # voxel2obj('out/prediction_{}b_{}.obj'.format(args.batch_size, batch_idx),
            #           pred[0, :, 1, :, :] > cfg.TEST.VOXEL_THRESH)
        else:
            break

        batch_idx += 1
Exemplo n.º 8
0
def main():
    cfg.TRAIN.CROP_X = 10
    cfg.TRAIN.CROP_Y = 10

    NUM_IMG = cfg.TRAIN.NUM_RENDERING
    OUTPUT_DIR = 'rendering'  # Subfolder per model to save prerendered images.

    # Multi-process pre-rendering
    # Blender tends to get slower after it renders few hundred models. Start
    # over the whole pool every BATCH_SIZE models to boost the speed.
    NUM_PROCESS = 6
    BATCH_SIZE = 200
    args = [(category, model_id, NUM_IMG)
            for category, model_id in category_model_id_pair(
                dataset_portion=[0, 1])]

    args_batches = [
        args[i * BATCH_SIZE:min((i + 1) * BATCH_SIZE, len(args))]
        for i in range(len(args) // BATCH_SIZE + 1)
    ]

    for args_batch in args_batches:
        with Pool(processes=NUM_PROCESS) as pool:
            pool.starmap(render_model, args_batch)
Exemplo n.º 9
0
def test_net():
    ''' Evaluate the network '''
    # Make result directory and the result file.
    result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_fn = os.path.join(result_dir, 'result.mat')

    print("Exp file will be written to: " + result_fn)

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)

    #print('Network definition: \n')
    #print(inspect.getsource(NetworkClass.network_definition))

    net = NetworkClass()
    
    net.cuda()
    
    solver = Solver(net)
    solver.load(cfg.CONST.WEIGHTS)

    # set constants
    batch_size = cfg.CONST.BATCH_SIZE

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue, data_pair, 1, repeat=False, train=False)

    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / batch_size)

    # prepare result container
    results = {'cost': np.zeros(num_batch),
               'mAP': np.zeros((num_batch, batch_size))}
    # Save results for various thresholds
    for thresh in cfg.TEST.VOXEL_THRESH:
        results[str(thresh)] = np.zeros((num_batch, batch_size, 5))

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        #activations is a list of torch.cuda.FloatTensor
        pred, loss, activations = solver.test_output(batch_img, batch_voxel)
        
        #convert pytorch tensor to numpy array
        pred = pred.data.cpu().numpy()
        loss = loss.data.cpu().numpy()

        for j in range(batch_size):
            # Save IoU per thresh
            for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
                r = evaluate_voxel_prediction(pred[j, ...], batch_voxel[j, ...], thresh)
                results[str(thresh)][batch_idx, j, :] = r

            # Compute AP
            precision = sklearn.metrics.average_precision_score(
                batch_voxel[j, 1].flatten(), pred[j, 1].flatten())

            results['mAP'][batch_idx, j] = precision

        # record result for the batch
        results['cost'][batch_idx] = float(loss)
        print('%d/%d, costs: %f, mAP: %f' %
                (batch_idx, num_batch, loss, np.mean(results['mAP'][batch_idx])))
        batch_idx += 1


    print('Total loss: %f' % np.mean(results['cost']))
    print('Total mAP: %f' % np.mean(results['mAP']))

    sio.savemat(result_fn, results)
Exemplo n.º 10
0
def main():
    for category, model_id in category_model_id_pair(dataset_portion=[0, 1]):
        evaluate_grasps(category, model_id)
Exemplo n.º 11
0
 def __init__(self, dataset_portion=[]):
     self.samples = category_model_id_pair(dataset_portion)