Exemple #1
0
    def get_train_data(self):
        while True:
            # # full shuffle, only effective if train/test size smaller than inferred programs
            # ids = np.arange(len(self.expressions))
            # np.random.shuffle(ids)
            # self.expressions = [self.expressions[index] for index in ids]
            # self.labels = self.labels[ids]

            self.correct_programs = []
            ids = np.arange(self.train_size)
            np.random.shuffle(ids)
            for i in range(0, self.train_size, self.batch_size):
                stacks = []
                batch_exp = [
                    self.expressions[index]
                    for index in ids[i:i + self.batch_size]
                ]
                batch_labels = self.labels[ids[i:i + self.batch_size]]
                if self.self_training:
                    batch_images = self.images[ids[i:i + self.batch_size]]

                for index, exp in enumerate(batch_exp):
                    program = self.parser.Parser.parse(exp)
                    # Check the validity of the expressions
                    if validity(program, len(program), len(program) - 1):
                        self.correct_programs.append(index)
                    else:
                        # stack = np.zeros(
                        #     (self.max_len + 1, self.max_len // 2 + 1, self.canvas_shape[0],
                        #      self.canvas_shape[1]))
                        stack = np.zeros((64, 64))
                        stacks.append(stack)
                        continue

                    if not self.self_training:
                        self.parser.sim.generate_stack(program)
                        stack = self.parser.sim.stack_t
                        stack = np.stack(stack, axis=0)
                        # pad if the program was shorter than the max_len since csgnet can only train on fixed sizes
                        stack = np.pad(
                            stack, (((self.max_len + 1) - stack.shape[0], 0),
                                    (0, 0), (0, 0), (0, 0)))
                        stack = stack[-1, 0, :, :]
                        stacks.append(stack)

                if not self.self_training:
                    stacks = np.stack(stacks, 0).astype(dtype=np.float32)
                else:
                    stacks = batch_images

                # # data needs to be (program_len + 1, dataset_size, stack_length, canvas_height, canvas_width)
                # batch_data = torch.from_numpy(stacks).permute(1, 0, 2, 3, 4)
                batch_data = torch.from_numpy(stacks)
                yield (batch_data, batch_labels)
Exemple #2
0
def optimize_expression(query_exp: string,
                        target_image: np.ndarray,
                        metric="iou",
                        stack_size=7,
                        steps=15,
                        max_iter=100):
    """
    A helper function for visually guided search. This takes the target image (or test 
    image) and predicted expression from CSGNet and returns the final chamfer distance 
    and optmized program with least chamfer distance possible.
    :param query_exp: program expression 
    :param target_image: numpy array of test image
    :param metric: metric to minimize while running the optimizer, "chamfer"
    :param stack_size: max stack size of the program required
    :param steps: max number of time step present in any program
    :param max_iter: max iteration for which to run the program.
    :return: 
    """
    # a parser to parse the input expressions.
    parser = ParseModelOutput(canvas_shape=[64, 64],
                              stack_size=stack_size,
                              unique_draws=None,
                              steps=steps)

    program = parser.Parser.parse(query_exp)
    if not validity(program, len(program), len(program) - 1):
        return query_exp, 16

    x = []
    for p in program:
        if p["value"] in ["c", "s", "t"]:
            x += [int(t) for t in p["param"]]

    optimizer = Optimize(query_exp,
                         metric=metric,
                         stack_size=stack_size,
                         steps=steps)
    optimizer.get_target_image(target_image)

    if max_iter == None:
        # None will stop when tolerance hits, not based on maximum iterations
        res = minimize(optimizer.objective,
                       x,
                       method="Powell",
                       tol=0.0001,
                       options={
                           "disp": False,
                           'return_all': False
                       })
    else:
        # This will stop when max_iter hits
        res = minimize(optimizer.objective,
                       x,
                       method="Powell",
                       tol=0.0001,
                       options={
                           "disp": False,
                           'return_all': False,
                           "maxiter": max_iter
                       })

    final_value = res.fun
    res = res.x.astype(np.int)
    for i in range(2, res.shape[0], 3):
        res[i] = np.clip(res[i], 8, 32)
    res = np.clip(res, 8, 56)
    predicted_exp = optimizer.make_expression(res)
    return predicted_exp, final_value
Exemple #3
0
                        image_path +
                        "{}.png".format(batch_idx * config.batch_size + j),
                        transparent=0)
                    plt.close("all")

    print("average chamfer distance: {}".format(
        CDs / (config.test_size // config.batch_size)),
          flush=True)

    if REFINE:
        Target_images = np.concatenate(Target_images, 0)
        tweaked_expressions = []
        scores = 0
        for index, value in enumerate(pred_expressions):
            prog = parser.Parser.parse(value)
            if validity(prog, len(prog), len(prog) - 1):
                optim_expression, score = optimize_expression(
                    value,
                    Target_images[index // beam_width],
                    metric="chamfer",
                    max_iter=None)
                print(value)
                tweaked_expressions.append(optim_expression)
                scores += score
            else:
                # If the predicted program is invalid
                tweaked_expressions.append(value)
                scores += 16

        print("chamfer scores", scores / len(tweaked_expressions))
        with open(