def get_observe_forward_func(self, observe_nodes): observe_nodes = to_list(observe_nodes) inputs = self.in_nodes_ + [self.tr_phase_node_] timer = Timer() f_observe_forward = K.function_no_given(inputs, observe_nodes) timer.show("Compiling f_observe_forward time:") return f_observe_forward
def compile(self, md): inputs = md.in_nodes_ + [md.tr_phase_node_] self._f_pred = K.function_no_given(inputs, md.out_nodes_) self._md_ = md # memory usage print "Callback", self._md_._show_memory_usage(self._md_.effective_layers_, self._batch_size_)
def _evaluate(self, x, y, eval_type): # get metric losses node loss_nodes = [] for metric in self._metrics_: # if use default objective if type(metric) is str: assert len(self._md_.out_nodes_)==len(self._md_.gt_nodes_), "If you are using default objectives, " \ + "out_node of out_layers must match ground truth!" loss_node = sum([obj.get(metric)(pred_node, gt_node) for pred_node, gt_node in zip(self._md_.out_nodes_, self._md_.gt_nodes_)]) # if user define their objective function elif isfunction(metric): loss_node = metric(self._md_) else: loss_node = metric loss_nodes.append(loss_node) # compile evaluation function if not hasattr(self, '_f_evaluate'): print 'compiling evaluation function ..' inputs = self._md_.in_nodes_ + self._md_.gt_nodes_ + [self._md_.tr_phase_node_] self._f_evaluate = K.function_no_given(inputs, loss_nodes) print 'compile finished. ' # calculate metric values t1 = time.time() if self._generator_: generator = self._generator_ else: generator = self._DefaultGenerator(self._batch_size_) n_all = 0. cnt= 0. metric_vals = np.zeros(len(self._metrics_)) batch_num = sum(1 for it in generator.generate(x, y)) for batch_x, batch_y in generator.generate(x, y): batch_x = to_list(batch_x) batch_y = to_list(batch_y) curr_batch_size = batch_x[0].shape[0] n_all += curr_batch_size cnt += 1. if self._transformer_: (batch_x, batch_y) = self._transformer_.transform(batch_x, batch_y) batch_x = format_data_list(batch_x) batch_y = format_data_list(batch_y) in_list = batch_x + batch_y + [0.] batch_metric_vals = np.array(self._f_evaluate(*in_list)) metric_vals += batch_metric_vals * curr_batch_size if self._verbose_==1: self._print_progress(batch_num, cnt) metric_vals /= n_all # timer t2 = time.time() # print results self._print_time_results(eval_type, self._metrics_, metric_vals, t2-t1)
def get_observe_backward_func(self, observe_nodes): if self.gt_nodes_ is None: raise Exception("You must call set_gt_nodes method before call observe_backward method!") observe_nodes = to_list(observe_nodes) inputs = self.in_nodes_ + self.gt_nodes_ + [self.tr_phase_node_] timer = Timer() f_observe_backward = K.function_no_given(inputs, observe_nodes) timer.show("Compiling f_observe_backward time:") return f_observe_backward
def get_observe_backward_func(self, observe_nodes): if self.gt_nodes_ is None: raise Exception( "You must call set_gt_nodes method before call observe_backward method!" ) observe_nodes = to_list(observe_nodes) inputs = self.in_nodes_ + self.gt_nodes_ + [self.tr_phase_node_] timer = Timer() f_observe_backward = K.function_no_given(inputs, observe_nodes) timer.show("Compiling f_observe_backward time:") return f_observe_backward
def get_optimization_func(self, target_dim_list, loss_func, optimizer, clip): """Compile and return optimization function. Args: target_dim_list: list of integars. targets' dimension. e.g. target_dim_list=[2] loss_func: string | function. optimizer: object. clip: None | real value. Return: optimization function. """ # set gt nodes self.set_gt_nodes(target_dim_list) # Default loss if type(loss_func) is str: assert len( self.out_nodes_ ) == 1, "If the number of out_layers > 1, you need define your own loss_func!" loss_node = obj.get(loss_func)(self.out_nodes_[0], self.gt_nodes_[0]) # User defined loss else: loss_node = loss_func(self) # Compute gradient gparams = K.grad(loss_node + self.reg_value_, self.params_) # Clip gradient if clip is not None: gparams = [K.clip(gparam, -clip, clip) for gparam in gparams] # Gradient based optimization param_updates = optimizer.get_updates(self.params_, gparams) # Get all updates updates = param_updates + self.inner_updates_ # Compile model inputs = self.in_nodes_ + self.gt_nodes_ + [K.common_tr_phase_node] outputs = [loss_node] f = K.function_no_given(inputs, outputs, updates) return f
def predict(self, x, batch_size=128, tr_phase=0.): """Predict output using current model. Args: x: ndarray | list of ndarray. batch_size: integar | None. Predict batch size. tr_phase: 0. | 1. Test phase or train phase. Returns: ndarray | list of ndarray. """ # Compile predict model just once if self._f_predict is None: inputs = self.in_nodes_ + [self.tr_phase_node_] timer = Timer() self._f_predict = K.function_no_given(inputs, self.out_nodes_) timer.show("Compiling f_predict time:") return self.run_function(self._f_predict, x, batch_size, tr_phase)
def get_optimization_func(self, target_dim_list, loss_func, optimizer, clip): """Compile and return optimization function. Args: target_dim_list: list of integars. targets' dimension. e.g. target_dim_list=[2] loss_func: string | function. optimizer: object. clip: None | real value. Return: optimization function. """ # set gt nodes self.set_gt_nodes(target_dim_list) # Default loss if type(loss_func) is str: assert len(self.out_nodes_)==1, "If the number of out_layers > 1, you need define your own loss_func!" loss_node = obj.get(loss_func)(self.out_nodes_[0], self.gt_nodes_[0]) # User defined loss else: loss_node = loss_func(self) # Compute gradient gparams = K.grad(loss_node + self.reg_value_, self.params_) # Clip gradient if clip is not None: gparams = [K.clip(gparam, -clip, clip) for gparam in gparams] # Gradient based optimization param_updates = optimizer.get_updates(self.params_, gparams) # Get all updates updates = param_updates + self.inner_updates_ # Compile model inputs = self.in_nodes_ + self.gt_nodes_ + [K.common_tr_phase_node] outputs = [loss_node] f = K.function_no_given(inputs, outputs, updates) return f
def predict(self, x, batch_size=100): # format data x = to_list(x) x = [K.format_data(e) for e in x] # compile predict model if not hasattr(self, '_f_predict'): self._f_predict = K.function_no_given(self._in_nodes_, self._tr_phase_node_, self._out_nodes_) # do predict # put all data in GPU if batch_size is None: in_list = x + [0.] y_out = self._f_predict(*in_list) # put batch data in GPU else: N = len(x[0]) batch_num = int(np.ceil(float(N) / batch_size)) n_out_nodes = len(self._out_nodes_) y_out = [[] for e in self._out_nodes_] for i1 in xrange(batch_num): in_list = [ e[i1 * batch_size:min((i1 + 1) * batch_size, N)] for e in x ] + [0.] batch_y_out = self._f_predict(*in_list) for j1 in xrange(n_out_nodes): y_out[j1].append(batch_y_out[j1]) # get y_out y_out = [np.concatenate(e, axis=0) for e in y_out] if len(y_out) == 1: return y_out[0] else: return y_out
def fit(self, x, y, batch_size=100, n_epochs=10, loss_func='categorical_crossentropy', optimizer=SGD(lr=0.01, rho=0.9), clip=None, callbacks=[], shuffle=True, verbose=1): x = to_list(x) y = to_list(y) # format x = [K.format_data(e) for e in x] y = [K.format_data(e) for e in y] # shuffle data if shuffle: x, y = supports.shuffle(x, y) # check data self._check_data(y, loss_func) # init gt_nodes self._gt_nodes_ = [K.placeholder(e.ndim) for e in y] # memory usage print "Train", self._show_memory_usage(self._layer_list_, batch_size) # default objective if type(loss_func) is str: assert len(self._out_nodes_)==len(self._gt_nodes_), "If you are using default objectives, " \ + "out_node of out_layers must match ground truth!" loss_node = sum([ obj.get(loss_func)(pred_node, gt_node) for pred_node, gt_node in zip( self._out_nodes_, self._gt_nodes_) ]) # user defined objective else: loss_node = loss_func(self._out_nodes_, self._any_nodes_, self._gt_nodes_) #loss_node = loss_func( self ) # gradient gparams = K.grad(loss_node + self._reg_value_, self._params_) # todo clip gradient if clip is not None: gparams = [K.clip(gparam, -clip, clip) for gparam in gparams] # gradient based opt param_updates = optimizer.get_updates(self._params_, gparams) # get all updates updates = param_updates + self._inner_updates_ # compile for callback if callbacks is not None: callbacks = to_list(callbacks) for callback in callbacks: callback.compile(self) # compile model input_nodes = self._in_nodes_ + self._gt_nodes_ output_nodes = [loss_node] f = K.function_no_given(input_nodes, self._tr_phase_node_, output_nodes, updates) # train N = len(x[0]) batch_num = int(np.ceil(float(N) / batch_size)) n_abs_epoch = n_epochs + self._epoch_ # callback print '\n0th epoch:' for callback in callbacks: if (self._epoch_ % callback.call_freq == 0): callback.call() while self._epoch_ < n_abs_epoch: self._epoch_ += 1 # train t1 = time.time() loss_list = [] for i2 in xrange(batch_num): batch_x = [ e[i2 * batch_size:min((i2 + 1) * batch_size, N)] for e in x ] batch_y = [ e[i2 * batch_size:min((i2 + 1) * batch_size, N)] for e in y ] in_list = batch_x + batch_y + [1.] loss = f(*in_list)[0] # training phase loss_list.append(loss) if verbose == 1: self._print_progress(self._epoch_, batch_num, i2) if verbose == 2: self._print_progress_loss(self._epoch_, batch_num, i2, loss) t2 = time.time() self._tr_time_ += (t2 - t1) if verbose != 0: print '\n', ' tr_time: ', "%.2f" % ( t2 - t1), 's' # print an empty line # callback for callback in callbacks: if (self._epoch_ % callback.call_freq == 0): callback.call()
def _evaluate(self, x, y, eval_type): # get metric losses node loss_nodes = [] for metric in self._metrics_: # if use default objective if type(metric) is str: assert len(self._md_.out_nodes_)==len(self._md_.gt_nodes_), "If you are using default objectives, " \ + "out_node of out_layers must match ground truth!" loss_node = sum([ obj.get(metric)(pred_node, gt_node) for pred_node, gt_node in zip(self._md_.out_nodes_, self._md_.gt_nodes_) ]) # if user define their objective function elif isfunction(metric): loss_node = metric(self._md_) else: loss_node = metric loss_nodes.append(loss_node) # compile evaluation function if not hasattr(self, '_f_evaluate'): print 'compiling evaluation function ..' inputs = self._md_.in_nodes_ + self._md_.gt_nodes_ + [ self._md_.tr_phase_node_ ] self._f_evaluate = K.function_no_given(inputs, loss_nodes) print 'compile finished. ' # calculate metric values t1 = time.time() if self._generator_: generator = self._generator_ else: generator = self._DefaultGenerator(self._batch_size_) n_all = 0. cnt = 0. metric_vals = np.zeros(len(self._metrics_)) batch_num = sum(1 for it in generator.generate(x, y)) for batch_x, batch_y in generator.generate(x, y): batch_x = to_list(batch_x) batch_y = to_list(batch_y) curr_batch_size = batch_x[0].shape[0] n_all += curr_batch_size cnt += 1. if self._transformer_: (batch_x, batch_y) = self._transformer_.transform(batch_x, batch_y) batch_x = format_data_list(batch_x) batch_y = format_data_list(batch_y) in_list = batch_x + batch_y + [0.] batch_metric_vals = np.array(self._f_evaluate(*in_list)) metric_vals += batch_metric_vals * curr_batch_size if self._verbose_ == 1: self._print_progress(batch_num, cnt) metric_vals /= n_all # timer t2 = time.time() # print results self._print_time_results(eval_type, self._metrics_, metric_vals, t2 - t1)
def compile(self, md): self._md = md self._f = K.function_no_given(md.in_nodes, md.tr_phase_node, md._layer_seq[3].output)
def _evaluate(self, x, y, eval_type): # init gt_nodes #gt_nodes = [ K.placeholder( e.ndim ) for e in y ] # get metric losses node loss_nodes = [] for metric in self._metrics_: # if use default objective if type(metric) is str: assert len(self._md_.out_nodes_)==len(self._md_.gt_nodes_), "If you are using default objectives, " \ + "out_node of out_layers must match ground truth!" loss_node = sum([ obj.get(metric)(pred_node, gt_node) for pred_node, gt_node in zip(self._md_.out_nodes_, self._md_.gt_nodes_) ]) # if user define their objective function elif isfunction(metric): loss_node = metric(self._md_.out_nodes_, self._md_.any_nodes_, self._md_.gt_nodes_) #loss_node = metric( self._md_ ) # if node else: loss_node = metric loss_nodes.append(loss_node) # compile evaluation function if not hasattr(self, '_f_evaluate'): print 'compiling evaluation function ..' #input_nodes = self._md_.in_nodes_ + gt_nodes input_nodes = self._md_.in_nodes_ + self._md_.gt_nodes_ self._f_evaluate = K.function_no_given(input_nodes, self._md_.tr_phase_node_, loss_nodes) print 'compile finished. ' # calculate metric values t1 = time.time() if self._batch_size_ is None: in_list = x + y + [0.] metric_vals = np.array(self._f_evaluate(*in_list)) else: N = len(x[0]) batch_num = int(np.ceil(float(N) / self._batch_size_)) # metric_container = [ [] for e in y ] metric_vals = np.zeros(len(self._metrics_)) # evaluate for each batch for i1 in xrange(batch_num): curr_batch_size = min( (i1 + 1) * self._batch_size_, N) - i1 * self._batch_size_ in_list = [ e[i1*self._batch_size_ : min( (i1+1)*self._batch_size_, N ) ] for e in x ] \ + [ e[i1*self._batch_size_ : min( (i1+1)*self._batch_size_, N ) ] for e in y ] + [0.] batch_metric_vals = np.array(self._f_evaluate(*in_list)) metric_vals += batch_metric_vals * curr_batch_size if self._verbose_ == 1: self._print_progress(batch_num, i1) metric_vals /= N # timer t2 = time.time() # print results self._print_time_results(eval_type, self._metrics_, metric_vals, t2 - t1)