Exemplo n.º 1
0
        beam_labels_numpy = np.zeros((config.batch_size * beam_width, max_len),
                                     dtype=np.int32)

        for i in range(data_.shape[1]):
            beam_labels_numpy[i * beam_width: (i + 1) * beam_width, :] = beam_labels[i]

        # find expression from these predicted beam labels
        expressions = [""] * config.batch_size * beam_width
        for i in range(config.batch_size * beam_width):
            for j in range(max_len):
                expressions[i] += generator.unique_draw[beam_labels_numpy[i, j]]
        for index, prog in enumerate(expressions):
            expressions[index] = prog.split("$")[0]

        #Predicted_expressions += expressions
        target_expressions = parser.labels2exps(labels, k)
        Target_expressions += target_expressions

        target_stacks = parser.expression2stack(target_expressions)

        target_voxels = target_stacks[-1,:,0,:,:,:].astype(dtype=bool)
        target_voxels_new = np.repeat(target_voxels, axis=0,
                                      repeats=beam_width)
        predicted_stack = stack_from_expressions(parser, expressions)

        beam_R = np.sum(np.logical_and(target_voxels_new, predicted_stack), (1, 2,
                                                                             3)) / \
                 (np.sum(np.logical_or(target_voxels_new, predicted_stack), (1, 2,
                                                                             3)) + 1)
        axis = glm.vec3(1,1,1)
        transfer_matrix = axis_view_matrix(axis=axis)
Exemplo n.º 2
0
        test_gen_objs[k] = generator.get_test_data(
            test_batch_size,
            k,
            num_train_images=dataset_sizes[k][0],
            num_test_images=dataset_sizes[k][1],
            jitter_program=jit)

    for k in dataset_sizes.keys():
        test_batch_size = config.batch_size
        for i in range(dataset_sizes[k][1] // test_batch_size):
            print(k, i, dataset_sizes[k][1] // test_batch_size)
            data_, labels = next(test_gen_objs[k])
            pred_images, pred_prog = evaluator.test(data_, parser, max_len)
            target_images = data_[-1, :, 0, :, :].astype(dtype=bool)
            labels = Variable(torch.from_numpy(labels)).cuda()
            targ_prog = parser.labels2exps(labels, k)

            programs_tar[jit] += targ_prog
            programs_pred[jit] += pred_prog
            distance = chamfer(target_images, pred_images)
            total_CD += np.sum(distance)

    over_all_CD[jit] = total_CD / total_size

metrics["chamfer"] = over_all_CD
print(metrics, model_name)
print(over_all_CD)

results_path = "trained_models/results/{}/".format(model_name)
os.makedirs(os.path.dirname(results_path), exist_ok=True)
Exemplo n.º 3
0
class WakeSleepGen:
    def __init__(self,
                 labels_path,
                 batch_size=100,
                 train_size=10000,
                 canvas_shape=[64, 64],
                 max_len=13,
                 self_training=False):

        self.labels = torch.load(labels_path + "labels.pt",
                                 map_location=device)
        if isinstance(self.labels, np.ndarray):
            self.labels = torch.from_numpy(self.labels).to(device)
        self.labels = self.labels.long()

        self.self_training = self_training
        if self_training:
            self.images = torch.load(labels_path + "images.pt")

        # pad labels with a stop symbol, should be correct but need to confirm this
        # since infer_programs currently outputs len 13 labels
        self.labels = F.pad(self.labels, (0, 1), 'constant', 399)

        self.train_size = train_size
        self.max_len = max_len
        self.canvas_shape = canvas_shape
        self.batch_size = batch_size

        with open("terminals.txt", "r") as file:
            self.unique_draw = file.readlines()
        for index, e in enumerate(self.unique_draw):
            self.unique_draw[index] = e[0:-1]

        self.parser = ParseModelOutput(self.unique_draw, self.max_len // 2 + 1,
                                       self.max_len, canvas_shape)
        self.expressions = self.parser.labels2exps(self.labels,
                                                   self.labels.shape[1])
        # Remove the stop symbol and later part of the expression
        for index, exp in enumerate(self.expressions):
            self.expressions[index] = exp.split("$")[0]
        self.correct_programs = []

    def get_train_data(self):
        while True:
            # # full shuffle, only effective if train/test size smaller than inferred programs
            # ids = np.arange(len(self.expressions))
            # np.random.shuffle(ids)
            # self.expressions = [self.expressions[index] for index in ids]
            # self.labels = self.labels[ids]

            self.correct_programs = []
            ids = np.arange(self.train_size)
            np.random.shuffle(ids)
            for i in range(0, self.train_size, self.batch_size):
                stacks = []
                batch_exp = [
                    self.expressions[index]
                    for index in ids[i:i + self.batch_size]
                ]
                batch_labels = self.labels[ids[i:i + self.batch_size]]
                if self.self_training:
                    batch_images = self.images[ids[i:i + self.batch_size]]

                for index, exp in enumerate(batch_exp):
                    program = self.parser.Parser.parse(exp)
                    # Check the validity of the expressions
                    if validity(program, len(program), len(program) - 1):
                        self.correct_programs.append(index)
                    else:
                        # stack = np.zeros(
                        #     (self.max_len + 1, self.max_len // 2 + 1, self.canvas_shape[0],
                        #      self.canvas_shape[1]))
                        stack = np.zeros((64, 64))
                        stacks.append(stack)
                        continue

                    if not self.self_training:
                        self.parser.sim.generate_stack(program)
                        stack = self.parser.sim.stack_t
                        stack = np.stack(stack, axis=0)
                        # pad if the program was shorter than the max_len since csgnet can only train on fixed sizes
                        stack = np.pad(
                            stack, (((self.max_len + 1) - stack.shape[0], 0),
                                    (0, 0), (0, 0), (0, 0)))
                        stack = stack[-1, 0, :, :]
                        stacks.append(stack)

                if not self.self_training:
                    stacks = np.stack(stacks, 0).astype(dtype=np.float32)
                else:
                    stacks = batch_images

                # # data needs to be (program_len + 1, dataset_size, stack_length, canvas_height, canvas_width)
                # batch_data = torch.from_numpy(stacks).permute(1, 0, 2, 3, 4)
                batch_data = torch.from_numpy(stacks)
                yield (batch_data, batch_labels)