示例#1
0
def sample_batch(samples, is_training):
    indices = []
    for b in range(args.batch_size):
        ind = np.random.randint(0, len(samples))
        sample_batch.num_vertices = max(sample_batch.num_vertices,
                                        samples[ind]['V'].size(0))
        sample_batch.num_faces = max(sample_batch.num_faces,
                                     samples[ind]['F'].size(0))
        indices.append(ind)

    inputs.resize_(args.batch_size, sample_batch.num_vertices, 3)
    inputs.fill_(0)
    targets.resize_(args.batch_size)
    mask.resize_(args.batch_size, sample_batch.num_vertices, 1)
    mask.fill_(0)
    laplacian = []

    Di = []
    DiA = []

    for b, ind in enumerate(indices):
        num_vertices = samples[ind]['V'].size(0)
        num_faces = samples[ind]['F'].size(0)

        inputs[b, :num_vertices] = samples[ind]['V']
        targets[b] = samples[ind]['label']
        mask[b, :num_vertices] = 1

        laplacian.append(samples[ind]['L'])
        Di.append(samples[ind]['Di'])
        DiA.append(samples[ind]['DiA'])

    laplacian = utils.sparse_cat(laplacian, sample_batch.num_vertices,
                                 sample_batch.num_vertices)
    Di = utils.sparse_cat(Di, 4 * sample_batch.num_faces,
                          4 * sample_batch.num_vertices)
    DiA = utils.sparse_cat(DiA, 4 * sample_batch.num_vertices,
                           4 * sample_batch.num_faces)

    volatile = False
    if args.cuda:
        return Variable(inputs, volatile=volatile).cuda(), Variable(
            targets, volatile=volatile).cuda(), Variable(
                mask, volatile=volatile).cuda(), Variable(
                    laplacian, volatile=volatile).cuda(), Variable(
                        Di, volatile=volatile).cuda(), Variable(
                            DiA, volatile=volatile).cuda()
    else:
        return Variable(inputs, volatile=volatile), Variable(
            targets,
            volatile=volatile), Variable(mask, volatile=volatile), Variable(
                laplacian, volatile=volatile), Variable(
                    Di, volatile=volatile), Variable(DiA, volatile=volatile)
示例#2
0
def sample_batch(samples):
    indices = []
    for b in range(args.batch_size):
        ind = np.random.randint(0, len(samples))
        sample_batch.num_vertices = max(sample_batch.num_vertices, samples[ind]['V'].size(0))
        sample_batch.num_faces = max(sample_batch.num_faces, samples[ind]['F'].size(0))
        indices.append(ind)

    inputs = torch.zeros(args.batch_size, sample_batch.num_vertices, 3)
    mask = torch.zeros(args.batch_size, sample_batch.num_vertices, 1)

    flat_inputs = torch.zeros(args.batch_size, sample_batch.num_vertices, 3)
    faces = torch.zeros(args.batch_size, sample_batch.num_faces, 3).long()
    laplacian = []
    flat_laplacian = []

    Di = []
    DiA = []
    flat_Di = []
    flat_DiA = []

    for b, ind in enumerate(indices):
        num_vertices = samples[ind]['V'].size(0)
        num_faces = samples[ind]['F'].size(0)

        inputs[b, :num_vertices] = samples[ind]['V']
        flat_inputs[b, :num_vertices, 0:2] = samples[ind]['V'][:, 0:2]

        mask[b, :num_vertices] = 1
        faces[b, :num_faces] = samples[ind]['F']

        laplacian.append(samples[ind]['L'])
        flat_laplacian.append(samples[ind]['flat_L'])

        if args.model == "dir":
            Di.append(samples[ind]['Di'])
            DiA.append(samples[ind]['DiA'])

            flat_Di.append(samples[ind]['flat_Di'])
            flat_DiA.append(samples[ind]['flat_DiA'])


    laplacian = utils.sparse_cat(laplacian, sample_batch.num_vertices, sample_batch.num_vertices)
    flat_laplacian = utils.sparse_cat(flat_laplacian, sample_batch.num_vertices, sample_batch.num_vertices)

    if args.model == "dir":
        Di = utils.sparse_cat(Di, 4 * sample_batch.num_faces, 4 * sample_batch.num_vertices)
        DiA = utils.sparse_cat(DiA, 4 * sample_batch.num_vertices, 4 * sample_batch.num_faces)

        flat_Di = utils.sparse_cat(flat_Di, 4 * sample_batch.num_faces, 4 * sample_batch.num_vertices)
        flat_DiA = utils.sparse_cat(flat_DiA, 4 * sample_batch.num_vertices, 4 * sample_batch.num_faces)
        return Variable(inputs).cuda(), Variable(flat_inputs).cuda(), Variable(mask).cuda(), Variable(laplacian).cuda(), Variable(flat_laplacian).cuda(), Variable(Di).cuda(), Variable(DiA).cuda(), Variable(flat_Di).cuda(), Variable(flat_DiA).cuda(), faces
    else:
        return Variable(inputs).cuda(), Variable(flat_inputs).cuda(), Variable(mask).cuda(), Variable(laplacian).cuda(), Variable(flat_laplacian).cuda(), None, None, None, None, faces
示例#3
0
def sample_batch(sequences, is_training, args):

    indices = []
    offsets = []

    input_frames = 1
    output_frames = 40
    gc.collect()
    for b in range(args.batch_size):
        if is_training:
            sample_batch.test_ind = 0
            ind = np.random.randint(0, len(sequences) // 10 * 8)
            if args.full_train:
                ind = np.random.randint(0, len(sequences))
            offsets.append(0)
        else:
            ind = sample_batch.test_ind
            offsets.append(0)

        sequence_ind = (sequences[ind])
        if type(sequence_ind) == str:
            sequence_ind = read_data(sequence_ind, args)

        sample_batch.num_vertices = max(sample_batch.num_vertices,
                                        sequence_ind['V'].size(0))
        sample_batch.num_faces = max(sample_batch.num_faces,
                                     sequence_ind['F'].size(0))

        indices.append(ind)

    inputs = torch.zeros(args.batch_size, sample_batch.num_vertices,
                         3 * input_frames)
    #targets = (torch.zeros(args.batch_size, sample_batch.num_vertices, sample_batch.num_vertices).cuda(), torch.zeros(args.batch_size, sample_batch.num_vertices).cuda(), torch.zeros(args.batch_size, sample_batch.num_vertices).cuda())
    mask = torch.zeros(args.batch_size, sample_batch.num_vertices, 1)
    faces = torch.zeros(args.batch_size, sample_batch.num_faces, 3).long()
    if 'amp' in args.model:
        laplacian = [[] for i in range(args.layer)]
    laplacian = []
    targets = [None] * args.batch_size

    Di = []
    DiA = []

    for b, (ind, offset) in enumerate(zip(indices, offsets)):
        num_vertices = sequence_ind['V'].size(0)
        num_faces = sequence_ind['F'].size(0)

        inputV = sequence_ind['V']
        if args.xz_rotate:
            rotate_matrix_xz = lambda t: torch.Tensor([[
                np.cos(t), 0, np.sin(t)
            ], [0, 1, 0], [-np.sin(t), 0, np.cos(t)]])
            inputV = torch.matmul(
                inputV, rotate_matrix_xz(random.random() * 2 * np.pi))
        if args.xy_rotate:
            rotate_matrix_xy = lambda t: torch.Tensor([[
                np.cos(t), np.sin(t), 0
            ], [-np.sin(t), np.cos(t), 0], [0, 0, 1]])
            inputV = torch.matmul(
                inputV, rotate_matrix_xy(random.random() * 2 * np.pi))

        inputs[b, :num_vertices, :3] = inputV

        targets[b] = (sequence_ind['G'].cuda(), sequence_ind['label'].cuda(),
                      sequence_ind['label_inv'].cuda())

        mask[b, :num_vertices] = 1
        faces[b, :num_faces] = sequence_ind['F']

        if 'amp' in args.model:
            L = sequence_ind['L']
            laplacian.append(L)
        elif 'lap' in args.model:
            L = sequence_ind['L']
            laplacian.append(L)

        if 'dir' in args.model:
            Di.append(sequence_ind['Di'])
            DiA.append(sequence_ind['DiA'])
    if 'amp' in args.model:
        laplacian = [
            utils.sparse_cat(lap, sample_batch.num_vertices,
                             sample_batch.num_vertices).coalesce()
            for lap in map(list, zip(*laplacian))
        ]
    elif 'lap' in args.model:
        laplacian = utils.sparse_cat(laplacian, sample_batch.num_vertices,
                                     sample_batch.num_vertices).coalesce()

    Operator = None
    if 'dir' in args.model:
        Di = utils.sparse_cat(Di, 4 * sample_batch.num_faces,
                              4 * sample_batch.num_vertices).coalesce()
        DiA = utils.sparse_cat(DiA, 4 * sample_batch.num_vertices,
                               4 * sample_batch.num_faces).coalesce()
        Operator = (Variable(Di).cuda(), Variable(DiA).cuda())
    elif 'amp' in args.model:
        Operator = [Variable(lap).cuda() for lap in laplacian]
    elif 'lap' in args.model:
        Operator = Variable(laplacian).cuda()
    return Variable(inputs).cuda(), (
        targets), Variable(mask).cuda(), Operator, faces
示例#4
0
def sample_batch(sequences, is_training, is_fixed=False):
    global test_ind
    indices = []
    offsets = []

    input_frames = 2
    output_frames = 40

    for b in range(args.batch_size):
        if is_training:
            test_ind = 0
            ind = np.random.randint(0, len(sequences) // 10 * 8)
            offsets.append(np.random.randint(0, len(sequences[ind]) - input_frames - output_frames))
        elif not is_fixed:
            ind = len(sequences) // 10 * 8 + test_ind
            offsets.append(test_ind % (len(sequences[ind]) - input_frames - output_frames))
            test_ind += 1
        elif is_fixed:
            ind = len(sequences) // 10 * 8 + b
            offsets.append(b % (len(sequences[ind]) - input_frames - output_frames))

        sample_batch.num_vertices = max(sample_batch.num_vertices, sequences[ind][0]['V'].size(0))
        sample_batch.num_faces = max(sample_batch.num_faces, sequences[ind][0]['F'].size(0))

        indices.append(ind)

    inputs = torch.zeros(args.batch_size, sample_batch.num_vertices, 3 * input_frames)
    targets = torch.zeros(args.batch_size, sample_batch.num_vertices, 3 * output_frames)
    mask = torch.zeros(args.batch_size, sample_batch.num_vertices, 1)
    faces = torch.zeros(args.batch_size, sample_batch.num_faces, 3).long()
    laplacian = []

    Di = []
    DiA = []

    for b, (ind, offset) in enumerate(zip(indices, offsets)):
        #offset = 0
        num_vertices = sequences[ind][0]['V'].size(0)
        num_faces = sequences[ind][0]['F'].size(0)

        for i in range(input_frames):
            inputs[b, :num_vertices, 3*i:3*(i+1)] = sequences[ind][i + offset]['V']

        for i in range(output_frames):
            targets[b, :num_vertices, 3*i:3*(i+1)] = sequences[ind][i + offset + input_frames]['V']

        mask[b, :num_vertices] = 1
        faces[b, :num_faces] = sequences[ind][0]['F']

        L = sequences[ind][offset + input_frames - 1]['L']
        laplacian.append(L)

        if args.model == "dir":
            Di.append(sequences[ind][offset + input_frames - 1]['Di'])
            DiA.append(sequences[ind][offset + input_frames - 1]['DiA'])

    laplacian = utils.sparse_cat(laplacian, sample_batch.num_vertices, sample_batch.num_vertices)

    if args.model == "dir":
        Di = utils.sparse_cat(Di, 4 * sample_batch.num_faces, 4 * sample_batch.num_vertices)
        DiA = utils.sparse_cat(DiA, 4 * sample_batch.num_vertices, 4 * sample_batch.num_faces)

    if args.cuda:
        if args.model == "dir":
            return Variable(inputs).cuda(), Variable(targets).cuda(), Variable(mask).cuda(), Variable(laplacian).cuda(), Variable(Di).cuda(), Variable(DiA).cuda(), faces
        else:
            return Variable(inputs).cuda(), Variable(targets).cuda(), Variable(mask).cuda(), Variable(laplacian).cuda(), None, None, faces
    else:
        return Variable(inputs), Variable(targets), Variable(mask), Variable(laplacian), Variable(Di), Variable(DiA), faces