Esempio n. 1
0
 def execute(self):
     input_memory = self.dependencies[0].output_memory
     in_H = input_memory.shape[2]
     in_W = input_memory.shape[3]
     input_data = input_memory.get_data()
     if len(input_memory.shape) != 4:
         ErrorHandler.raise_error(self.id, " input memory needs to 4 dim!")
     out_N, out_C, out_H, out_W = self.calc_output_shape(input_memory.shape)
     self.output_memory = MemoryImpl((out_N, out_C, out_H, out_W))
     out_data = self.output_memory.get_original_data()
     for N in range(out_N):
         for C in range(out_C):
             for H in range(out_H):
                 for W in range(out_W):
                     inp_idx_h = H * self.stride[0]
                     inp_idx_w = W * self.stride[1]
                     values = []
                     for i in range(self.pooling_shape[0]):
                         if inp_idx_h + i < in_H:
                             for j in range(self.pooling_shape[1]):
                                 if inp_idx_w + j < in_W:
                                     values.append(
                                         input_data[N][C][inp_idx_h +
                                                          i][inp_idx_w + j])
                     out_data[N][C][H][W] = self.do_pooling(values)
Esempio n. 2
0
 def do_activation(self, value):
     if self.activation_func == ActivationFunctions.RELU:
         return max(0, value)
     elif self.activation_func == ActivationFunctions.NONE:
         return value
     else:
         ErrorHandler.raise_error("[ERROR] Activation function for: " +
                                  self.id + " not implemented.")
Esempio n. 3
0
 def make_connections(self):
     for node_id, node in self.nodes_map.items():
         input_ids = node.primitive.inputs
         if len(input_ids) != 0:
             for inp_id in input_ids:
                 if self.nodes_map.get(inp_id) is None:
                     ErrorHandler.raise_error("There is no " + str(inp_id) +
                                              ", which is input to: " +
                                              str(node_id))
                 node.dependencies.append(self.nodes_map[inp_id])
                 self.nodes_map[inp_id].users.append(node)
Esempio n. 4
0
 def execute(self):
     input_memory = self.dependencies[0].output_memory
     if len(input_memory.shape) != 4:
         ErrorHandler.raise_error(self.id, " input memory needs to 4 dim!")
     inp_data = input_memory.get_original_data()
     output_shape = input_memory.get_shape()
     self.output_memory = MemoryImpl(output_shape)
     out_data = self.output_memory.get_original_data()
     for N in range(input_memory.shape[0]):
         sum_batch = np.sum(np.exp(inp_data[N]))
         for C in range(input_memory.shape[1]):
             for H in range(input_memory.shape[2]):
                 for W in range(input_memory.shape[3]):
                     out_data[N][C][H][W] = np.exp(inp_data[N][C][H][W]) / sum_batch
                     if self.do_log:
                         out_data[N][C][H][W] = np.log(out_data[N][C][H][W])
     pass
Esempio n. 5
0
 def calc_output_shape(self):
     batch = self.input(0).output_memory.shape[0]
     in_wh = self.input(0).output_memory.shape[2]
     out_ch = self.input(1).output_memory.shape[0]
     kernel_size = self.input(1).output_memory.shape[2]
     if self.input(0).output_memory.shape[2] != self.input(
             0).output_memory.shape[3]:
         ErrorHandler.raise_error(
             "Currently unsymetric input size is not supported.")
     if self.input(1).output_memory.shape[2] != self.input(
             1).output_memory.shape[3]:
         ErrorHandler.raise_error(
             "Currently unsymetric kernel size is not supported.")
     if self.input(0).output_memory.shape[1] != self.input(
             1).output_memory.shape[1]:
         ErrorHandler.raise_error(
             "Diffrent size of features for input and weights!")
     hw_out = (in_wh - kernel_size) + 1
     return batch, out_ch, hw_out, hw_out
Esempio n. 6
0
 def add_node(self, node):
     ErrorHandler.is_type_generic(node, Node)
     if node.id in self.nodes_map:
         ErrorHandler.raise_error(node.id + "is already in nodes_map!!!")
     self.nodes_map[node.id] = node
Esempio n. 7
0
 def execute(self):
     ErrorHandler.raise_error("[ERROR] Execute not implemented for: " +
                              self.id + "!!!")
Esempio n. 8
0
 def input(self, idx=0):
     if idx > len(self.dependencies) - 1:
         ErrorHandler.raise_error("No input with index: ", idx)
     return self.dependencies[idx]
Esempio n. 9
0
 def do_pooling(self, values):
     if self.pooling_type == PoolingType.MAX:
         return max(values)
     else:
         ErrorHandler.raise_error("[ERROR] Pooling type for: " + self.id +
                                  " not implemented.")