Esempio n. 1
0
    def __init__(self, file_path=None, mode=None):
        self.eye = glm.vec3(0, 0, 0)
        self.up = glm.vec3(0, 1, 0)
        self.center = glm.vec3(32, 32, 32)
        self.current_index = 0
        self.is_target = True
        if (file_path == None):
            self.file_path = 'trained_models/results/given-model.pth'
        else:
            self.file_path = file_path

        if (mode == 'test'):
            m_path = ['/pred.txt', '/target.txt']
        elif (mode == 'beam' or mode == None):
            m_path = ['/beam_10_pred.txt', '/beam_10_target.txt']
        else:
            m_path = ['/beam_10_pred-M.txt', '/beam_10_target-M.txt']

        with open(self.file_path + m_path[0]) as data_file:
            self.expressions = data_file.readlines()
        with open(self.file_path + m_path[1]) as target_data_file:
            self.target_expressions = target_data_file.readlines()

        self.primitives = dd.io.load("data/primitives.h5")
        self.img_points = []
        self.target_img_points = []

        self.img_planes = []
        self.target_img_planes = []

        self.target_str = ""
        self.pred_str = ""

        self.pixmap = None
        self.target_pixmap = None
Esempio n. 2
0
def find_points(a):
    l = []
    for i in range(64):
        for j in range(64):
            for k in range(64):
                if a[i, j, k] == True:
                    l.append(glm.vec3(i, j, k))

    return l
Esempio n. 3
0
def find_points_simple(a):
    l = []

    indexl = np.argwhere(a == True)

    for i in indexl:
        l.append(glm.vec3(i[0], i[1], i[2]))

    return l
Esempio n. 4
0
def border_find_points(a):
    l = []
    for i in range(64):
        for j in range(64):
            for k in range(64):
                if a[i, j, k] == True:
                    # remove inner points , only keep surface points(cubes)
                    if (i > 0 and a[i-1,j,k] == True) and (i < 63 and a[i+1,j,k] == True) \
                        and (j > 0 and a[i,j-1,k] == True) and (j < 63 and a[i,j+1,k] == True) \
                        and (k > 0 and a[i,j,k-1] == True) and (k < 63 and a[i,j,k+1] == True):
                        continue
                    else:
                        l.append(glm.vec3(i, j, k))

    return l
Esempio n. 5
0
def axis_view_matrix(axis: glm.vec3):
    zaxis = axis.normalize()

    yaxis = glm.vec3(1, 1, (-zaxis.x - zaxis.y) / zaxis.z)
    yaxis = yaxis.normalize()

    xaxis = yaxis.cross(zaxis)
    xaxis = xaxis.normalize()

    # return glm.mat3(xaxis.x,xaxis.y,xaxis.z,
    #     yaxis.x,yaxis.y,yaxis.z,
    #     zaxis.x,zaxis.y,zaxis.z)

    return np.array([[xaxis.x, xaxis.y, xaxis.z], [yaxis.x, yaxis.y, yaxis.z],
                     [zaxis.x, zaxis.y, zaxis.z]],
                    dtype=float)
Esempio n. 6
0
#encoding=utf8
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *

import numpy as np
import time

from src.display.glm import glm
from src.display import transform
from src.projection.find_points import *

import random

eye = glm.vec3(0, 0, 0)
up = glm.vec3(0, 1, 0)
center = glm.vec3(32, 32, 32)

current_index = 0
is_target = False

file_path = 'trained_models/results/given-model.pth'

if (len(sys.argv) > 1):
    file_path = sys.argv[1]
with open(file_path + '/beam_10_pred.txt') as data_file:
    expressions = data_file.readlines()
with open(file_path + '/beam_10_target.txt') as target_data_file:
    target_expressions = target_data_file.readlines()

import deepdish as dd
Esempio n. 7
0
        #Predicted_expressions += expressions
        target_expressions = parser.labels2exps(labels, k)
        Target_expressions += target_expressions

        target_stacks = parser.expression2stack(target_expressions)

        target_voxels = target_stacks[-1,:,0,:,:,:].astype(dtype=bool)
        target_voxels_new = np.repeat(target_voxels, axis=0,
                                      repeats=beam_width)
        predicted_stack = stack_from_expressions(parser, expressions)

        beam_R = np.sum(np.logical_and(target_voxels_new, predicted_stack), (1, 2,
                                                                             3)) / \
                 (np.sum(np.logical_or(target_voxels_new, predicted_stack), (1, 2,
                                                                             3)) + 1)
        axis = glm.vec3(1,1,1)
        transfer_matrix = axis_view_matrix(axis=axis)
        center = np.dot( transfer_matrix,np.array([32,32,32],dtype=float) )
        
        # choose an output whose projection is the most similar to the input 2D image
        predicted_images = np.zeros([beam_width * config.batch_size, 128, 128], dtype = float)
        for index,voxel in enumerate(predicted_stack):
            point_list = axis_view_place_points(voxel, transfer_matrix = transfer_matrix)

            img = z_parrallel_projection_point_simple(point_list,origin_w=128,origin_h=128, origin_z=128, w=128, h=128, center_x=center[0], center_y=center[1])
        
            predicted_images[index,:,:] = img

        target_images = data_[-1, :, 0, :, :]
        target_images_new = np.repeat(target_images, axis=0, repeats=beam_width)
        beam_choose_R = np.sum( np.abs(predicted_images - target_images_new), (1,2) )
Esempio n. 8
0
    def get_test_data(self, batch_size: int, program_len: int,
                      if_randomize=False, final_canvas=False,
                      num_train_images=None, num_test_images=None,
                      if_primitives=False, if_jitter=False):
        """
        Test dataset creation. It is assumed that the first num_training
        examples in the dataset corresponds to training and later num_test
        are validation dataset. The validation may optionally be shuffled
        randomly but usually not required.
        :param num_train_images:
        :param if_primitives: if pre-rendered primitives are given
        :param if_jitter: Whether to jitter the voxel grids
        :param num_test_images: Number of test images
        :param batch_size: batch size of dataset to yielded
        :param program_len: length of program to be generated
        :param if_randomize: if randomize
        :param final_canvas: if true return only the target canvas instead of 
        complete stack to save memory
        :return: 
        """
        # This generates test data of fixed length. Samples are not shuffled
        # by default.
        labels = np.zeros((batch_size, program_len + 1), dtype=np.int64)
        sim = SimulateStack(program_len // 2 + 1, self.canvas_shape,
                            self.unique_draw)
        sim.get_all_primitives(self.primitives)
        parser = Parser()

        axis = glm.vec3(1,1,1)
        transfer_matrix = axis_view_matrix(axis=axis)
        center = np.dot( transfer_matrix,np.array([32,32,32],dtype=float) )

        image_path = 'data/2D/'

        if final_canvas:
            # We will load all the final canvases from the disk.
            path = image_path + str(program_len) + '/'
            Stack = np.zeros((1, num_test_images, 1, 128, 128),
                             dtype=float)
            for i in range(num_train_images,
                           num_test_images + num_train_images):
                p = path + "{}.jpg".format(i)
                img = cv2.imread(p,0)
                img = np.array(img,dtype = float)
                img = img / 255
                Stack[0, i-num_train_images, 0, :, :] = img

        while True:
            # Random things to select random indices
            IDS = np.arange(num_train_images, num_train_images +
                            num_test_images)
            if if_randomize:
                np.random.shuffle(IDS)
            for rand_id in range(0, num_test_images - batch_size, batch_size):
                image_ids = IDS[rand_id: rand_id + batch_size]
                if not final_canvas:
                    stacks = []
                    sampled_exps = []
                    for index, value in enumerate(image_ids):
                        sampled_exps.append(self.programs[program_len][value])
                        if not if_primitives:
                            program = parser.parse(
                                self.programs[program_len][value])
                        if True:
                            # if all primitives are give already, parse using
                            #  different parser to get the keys to dict
                            try:
                                program = self.parse(self.programs[program_len][
                                                         value])
                            except:
                                print(index, self.programs[program_len][
                                    value])

                        sim.generate_stack(program, if_primitives=if_primitives)
                        #stack = sim.stack_t
                        #stack = np.stack(stack, axis=0)
                        #stacks.append(stack)

                        stack = []
                        
                        voxel = sim.stack.get_top()

                        point_list = axis_view_place_points(voxel, transfer_matrix = transfer_matrix)

                        #projection 
                        img = z_parrallel_projection_point_simple(point_list,origin_w=128,origin_h=128, origin_z=128, w=128, h=128, center_x=center[0], center_y=center[1])

                        stack.append([img])
                        
                        stack = np.stack(stack, axis=0)

                        stacks.append(stack)

                    stacks = np.stack(stacks, 1).astype(dtype=np.float32)
                else:
                    # When only target image is required
                    fetch_ids = image_ids - num_train_images
                    stacks = Stack[0:1, fetch_ids , 0:1, :, :].astype(
                        dtype=np.float32)
                for index, value in enumerate(image_ids):
                    # Get the current program
                    exp = self.programs[program_len][value]
                    program = self.parse(exp)
                    for j in range(program_len):
                        try:
                            labels[index, j] = self.unique_draw.index(
                                program[j]["value"])
                        except:
                            print(program)

                    labels[:, -1] = len(self.unique_draw) - 1

                # if if_jitter:
                #     temp = stacks[-1, :, 0, :, :, :]
                #     stacks[-1, :, 0, :, :, :] = np.roll(temp, (np.random.randint(-3, 4),
                #                                                np.random.randint(-3, 4),
                #                                                np.random.randint(-3, 4)),
                #                                         axis=(1, 2, 3))
                yield [stacks, labels]
Esempio n. 9
0
    def get_train_data(self, batch_size: int, program_len: int,
                       final_canvas=False, if_randomize=True, if_primitives=False,
                       num_train_images=400, if_jitter=False):
        """
        This is a special generator that can generate dataset for any length.
        This essentially corresponds to the "variable len program"
        experiment. Here, generate a dataset for training for fixed length.
        Since, this is a generator, you need to make a generator object for
        all different kind of lengths and use them as required. It is made
        sure that samples are shuffled only once in an epoch and all the
        samples are different in an epoch.
        :param if_randomize: whether to randomize the training instance during training.
        :param if_primitives: if pre-rendered primitives are given
        :param num_train_images: Number of training instances
        :param if_jitter: whether to jitter the voxels or not
        :param batch_size: batch size for the current program
        :param program_len: which program length dataset to sample
        :param final_canvas: This is special mode of data generation where
        all the dataset is loaded in one go and iteratively yielded. The
        dataset for only target images is created.
        """
        # The last label corresponds to the stop symbol and the first one to
        # start symbol.
        labels = np.zeros((batch_size, program_len + 1), dtype=np.int64)
        sim = SimulateStack(program_len // 2 + 1, self.canvas_shape,
                            self.unique_draw)
        sim.get_all_primitives(self.primitives)
        parser = Parser()

        axis = glm.vec3(1,1,1)
        transfer_matrix = axis_view_matrix(axis=axis)
        center = np.dot( transfer_matrix,np.array([32,32,32],dtype=float) )

        image_path = 'data/2D/'

        #if final_canvas:
        #    # We will load all the final canvases from the disk.
        #    path = image_path + str(program_len) + '/'
        #    Stack = np.zeros((1, num_train_images, 1, 128, 128),
        #                     dtype=float)
        #    for i in range(num_train_images):
        #        p = path + "{}.jpg".format(i)
        #        img = cv2.imread(p,0)
        #        img = np.array(img,dtype = float)
        #        img = img / 255
        #        Stack[0, i, 0, :, :] = img

        while True:
            # Random things to select random indices
            IDS = np.arange(num_train_images)
            if if_randomize:
                np.random.shuffle(IDS)
            for rand_id in range(0, num_train_images - batch_size,
                                 batch_size):
                image_ids = IDS[rand_id:rand_id + batch_size]
                if not final_canvas:
                    stacks = []
                    sampled_exps = []
                    for index, value in enumerate(image_ids):
                        sampled_exps.append(self.programs[program_len][value])
                        if not if_primitives:
                            program = parser.parse(
                                self.programs[program_len][value])
                        else:
                            # if all primitives are give already, parse using
                            #  different parser to get the keys to dict
                            program = self.parse(self.programs[program_len][
                                                         value])

                        
                        sim.generate_stack(program, if_primitives=if_primitives)
                        #stack = sim.stack_t
                        #stack = np.stack(stack, axis=0)
                        #stacks.append(stack)

                        stack = []

                        voxel = sim.stack.get_top()

                        point_list = axis_view_place_points(voxel, transfer_matrix = transfer_matrix)

                        #projection 
                        img = z_parrallel_projection_point_simple(point_list,origin_w=128,origin_h=128, origin_z=128, w=128, h=128, center_x=center[0], center_y=center[1])

                        stack.append([img])
                        
                        stack = np.stack(stack, axis=0)

                        stacks.append(stack)
                        
                    stacks = np.stack(stacks, 1).astype(dtype=np.float32)
                else:
                    # Only target image is required
                    stacks = np.zeros((1, batch_size, 1, 128, 128),
                             dtype=np.float32)
                    path = image_path + str(program_len) + '/'
                    for i, image_id in enumerate(image_ids):
                        p = path + "{}.jpg".format(image_id)
                        img = cv2.imread(p,0)
                        img = np.array(img,dtype = float)
                        img = img / 255
                        stacks[0, i, 0, :, :] = img

                    # stacks = Stack[0:1, image_ids, 0:1, :, :].astype(
                    #    dtype=np.float32)
                for index, value in enumerate(image_ids):
                    # Get the current program
                    exp = self.programs[program_len][value]
                    program = self.parse(exp)
                    for j in range(program_len):
                        labels[index, j] = self.unique_draw.index(
                            program[j]["value"])

                    labels[:, -1] = len(self.unique_draw) - 1

                # if if_jitter:
                #     temp = stacks[-1, :, 0, :, :, :]
                #     stacks[-1, :, 0, :, :, :] = np.roll(temp, (np.random.randint(-3, 4),
                #                                                np.random.randint(-3, 4),
                #                                                np.random.randint(-3, 4)),
                #                                         axis=(1, 2, 3))

                yield [stacks, labels]
Esempio n. 10
0
    def init_img(self):
        print('loading models...')

        expression = self.storage.expressions[self.storage.current_index]
        target_expression = self.storage.target_expressions[
            self.storage.current_index]

        voxel = voxels_from_expressions([expression, target_expression],
                                        self.storage.primitives,
                                        max_len=7)

        self.storage.img_planes = border_find_planes(voxel[0])
        self.storage.target_img_planes = border_find_planes(voxel[1])

        # calc the target_str

        pred_program = parse(expression)
        target_program = parse(target_expression)

        stack = []
        for sentence in pred_program:
            if (sentence['type'] == 'op'):
                a = stack.pop()
                b = stack.pop()
                c = '( ' + b + ' ' + sentence['value'] + ' ' + a + ' )'
                stack.append(c)
            else:
                stack.append(sentence['value'])

        self.storage.pred_str = stack.pop()
        count = len(self.storage.pred_str)
        if (count >= 50):
            self.storage.pred_str = self.storage.pred_str[
                0:50] + '\n' + self.storage.pred_str[50:]

        stack = []
        for sentence in target_program:
            if (sentence['type'] == 'op'):
                a = stack.pop()
                b = stack.pop()
                c = '( ' + b + ' ' + sentence['value'] + ' ' + a + ' )'
                stack.append(c)
            else:
                stack.append(sentence['value'])

        self.storage.target_str = stack.pop()
        count = len(self.storage.target_str)
        if (count >= 50):
            self.storage.target_str = self.storage.target_str[
                0:50] + '\n' + self.storage.target_str[50:]

        # load projection
        axis = glm.vec3(1, 1, 1)
        transfer_matrix = axis_view_matrix(axis=axis)
        center = np.dot(transfer_matrix, np.array([32, 32, 32], dtype=float))

        #pred
        point_list = axis_view_place_points(voxel[0],
                                            transfer_matrix=transfer_matrix)

        img = z_parrallel_projection_point_simple(point_list,
                                                  origin_w=128,
                                                  origin_h=128,
                                                  origin_z=128,
                                                  w=128,
                                                  h=128,
                                                  center_x=center[0],
                                                  center_y=center[1])

        img_mask = img * 255
        self.storage.pixmap = np.array(img_mask, dtype=int)
        self.storage.pixmap.resize(
            (self.storage.pixmap.shape[0], self.storage.pixmap.shape[1], 1))

        #target
        point_list = axis_view_place_points(voxel[1],
                                            transfer_matrix=transfer_matrix)

        img = z_parrallel_projection_point_simple(point_list,
                                                  origin_w=128,
                                                  origin_h=128,
                                                  origin_z=128,
                                                  w=128,
                                                  h=128,
                                                  center_x=center[0],
                                                  center_y=center[1])

        img_mask = img * 255
        self.storage.target_pixmap = np.array(img_mask, dtype=int)
        self.storage.target_pixmap.resize(
            (self.storage.target_pixmap.shape[0],
             self.storage.target_pixmap.shape[1], 1))

        print('loading fish!')