Ejemplo n.º 1
0
 def execute(self):
     input_memory = self.dependencies[0].output_memory
     in_H = input_memory.shape[2]
     in_W = input_memory.shape[3]
     input_data = input_memory.get_data()
     if len(input_memory.shape) != 4:
         ErrorHandler.raise_error(self.id, " input memory needs to 4 dim!")
     out_N, out_C, out_H, out_W = self.calc_output_shape(input_memory.shape)
     self.output_memory = MemoryImpl((out_N, out_C, out_H, out_W))
     out_data = self.output_memory.get_original_data()
     for N in range(out_N):
         for C in range(out_C):
             for H in range(out_H):
                 for W in range(out_W):
                     inp_idx_h = H * self.stride[0]
                     inp_idx_w = W * self.stride[1]
                     values = []
                     for i in range(self.pooling_shape[0]):
                         if inp_idx_h + i < in_H:
                             for j in range(self.pooling_shape[1]):
                                 if inp_idx_w + j < in_W:
                                     values.append(
                                         input_data[N][C][inp_idx_h +
                                                          i][inp_idx_w + j])
                     out_data[N][C][H][W] = self.do_pooling(values)
Ejemplo n.º 2
0
 def __init__(self, id, input, pooling_shape, pooling_type, stride=None):
     ErrorHandler.is_type_generic(pooling_shape, tuple)
     ErrorHandler.is_type_generic(pooling_type, PoolingType)
     super().__init__(id, [input])
     self.pooling_shape = pooling_shape
     self.pooling_type = pooling_type
     self.stride = stride
Ejemplo n.º 3
0
 def do_activation(self, value):
     if self.activation_func == ActivationFunctions.RELU:
         return max(0, value)
     elif self.activation_func == ActivationFunctions.NONE:
         return value
     else:
         ErrorHandler.raise_error("[ERROR] Activation function for: " +
                                  self.id + " not implemented.")
Ejemplo n.º 4
0
 def fill_data(self, new_data):
     copied_data = new_data
     if isinstance(copied_data, list):
         copied_data = np.asarray(new_data, dtype=float)
     else:
         ErrorHandler.is_type_generic(copied_data, np.ndarray)
         ErrorHandler.is_equal(self.shape, copied_data.shape)
     self.__data = copied_data
Ejemplo n.º 5
0
 def calc_output_shape(self, input_shape):
     ErrorHandler.is_type_generic(input_shape, tuple)
     nout = input_shape[0]
     cout = input_shape[1]
     hout = int((input_shape[2] -
                 (self.pooling_shape[0] - 1) - 1) / self.stride[0] + 1)
     wout = int((input_shape[3] -
                 (self.pooling_shape[1] - 1) - 1) / self.stride[1] + 1)
     return (nout, cout, hout, wout)
Ejemplo n.º 6
0
 def make_connections(self):
     for node_id, node in self.nodes_map.items():
         input_ids = node.primitive.inputs
         if len(input_ids) != 0:
             for inp_id in input_ids:
                 if self.nodes_map.get(inp_id) is None:
                     ErrorHandler.raise_error("There is no " + str(inp_id) +
                                              ", which is input to: " +
                                              str(node_id))
                 node.dependencies.append(self.nodes_map[inp_id])
                 self.nodes_map[inp_id].users.append(node)
Ejemplo n.º 7
0
 def __init__(self, primitives, dump_graph):
     ErrorHandler.is_list(primitives)
     self.graph = graph.Graph()
     # [1] Create nodes.
     self.__create_nodes(primitives)
     # [2] Connect nodes.
     self.graph.make_connections()
     # [3] Mark inputs and outputs nodes
     self.graph.set_inputs_and_outputs()
     # [4] Calc DFS (execution order)
     self.graph.calc_dfs()
     if dump_graph is True:
         self.__dump_graph()
Ejemplo n.º 8
0
 def execute(self):
     input_memory = self.dependencies[0].output_memory
     if len(input_memory.shape) != 4:
         ErrorHandler.raise_error(self.id, " input memory needs to 4 dim!")
     inp_data = input_memory.get_original_data()
     output_shape = input_memory.get_shape()
     self.output_memory = MemoryImpl(output_shape)
     out_data = self.output_memory.get_original_data()
     for N in range(input_memory.shape[0]):
         sum_batch = np.sum(np.exp(inp_data[N]))
         for C in range(input_memory.shape[1]):
             for H in range(input_memory.shape[2]):
                 for W in range(input_memory.shape[3]):
                     out_data[N][C][H][W] = np.exp(inp_data[N][C][H][W]) / sum_batch
                     if self.do_log:
                         out_data[N][C][H][W] = np.log(out_data[N][C][H][W])
     pass
Ejemplo n.º 9
0
 def calc_output_shape(self):
     batch = self.input(0).output_memory.shape[0]
     in_wh = self.input(0).output_memory.shape[2]
     out_ch = self.input(1).output_memory.shape[0]
     kernel_size = self.input(1).output_memory.shape[2]
     if self.input(0).output_memory.shape[2] != self.input(
             0).output_memory.shape[3]:
         ErrorHandler.raise_error(
             "Currently unsymetric input size is not supported.")
     if self.input(1).output_memory.shape[2] != self.input(
             1).output_memory.shape[3]:
         ErrorHandler.raise_error(
             "Currently unsymetric kernel size is not supported.")
     if self.input(0).output_memory.shape[1] != self.input(
             1).output_memory.shape[1]:
         ErrorHandler.raise_error(
             "Diffrent size of features for input and weights!")
     hw_out = (in_wh - kernel_size) + 1
     return batch, out_ch, hw_out, hw_out
Ejemplo n.º 10
0
 def __init__(self, id, input, new_shape):
     ErrorHandler.is_type_generic(new_shape, tuple)
     super().__init__(id, [input])
     self.new_shape = new_shape
Ejemplo n.º 11
0
 def __init__(self, id, memory):
     ErrorHandler.is_type_generic(memory, Memory)
     super().__init__(id, [])
     self.memory = memory
Ejemplo n.º 12
0
 def __init__(self, id, input, weights_id, bias_id):
     ErrorHandler.is_string(weights_id)
     ErrorHandler.is_string(bias_id)
     super().__init__(id, [input, weights_id, bias_id])
Ejemplo n.º 13
0
 def __init__(self, id, inputs):
     ErrorHandler.is_string(id)
     ErrorHandler.is_list(inputs)
     self.id = id
     self.inputs = inputs
Ejemplo n.º 14
0
 def execute(self):
     ErrorHandler.raise_error("[ERROR] Execute not implemented for: " +
                              self.id + "!!!")
Ejemplo n.º 15
0
 def input(self, idx=0):
     if idx > len(self.dependencies) - 1:
         ErrorHandler.raise_error("No input with index: ", idx)
     return self.dependencies[idx]
Ejemplo n.º 16
0
 def do_pooling(self, values):
     if self.pooling_type == PoolingType.MAX:
         return max(values)
     else:
         ErrorHandler.raise_error("[ERROR] Pooling type for: " + self.id +
                                  " not implemented.")
Ejemplo n.º 17
0
 def __init__(self, id, input, activation_function):
     ErrorHandler.is_type_generic(activation_function, ActivationFunctions)
     super().__init__(id, [input])
     self.activation = activation_function
Ejemplo n.º 18
0
 def add_node(self, node):
     ErrorHandler.is_type_generic(node, Node)
     if node.id in self.nodes_map:
         ErrorHandler.raise_error(node.id + "is already in nodes_map!!!")
     self.nodes_map[node.id] = node
Ejemplo n.º 19
0
 def __init__(self, id, inputs, axis=0):
     ErrorHandler.is_list(inputs)
     ErrorHandler.is_type_generic(axis, int)
     super().__init__(id, inputs)
     self.axis = axis
Ejemplo n.º 20
0
 def __init__(self, shape):
     ErrorHandler.is_type_generic(shape, tuple)
     self.shape = shape
     self.__data = np.empty(shape)