Exemple #1
0
    def _cast_params_2_next_dtype(self):
        ix = self.dtypes.index(self.current_dtype)
        current_dtype_nm = dtype2str(self.current_dtype)
        next_dtype = self.dtypes[ix + 1]
        next_dtype_nm = dtype2str(next_dtype)

        # Cast current weights to new dtype
        self.weights[next_dtype_nm] = cast_params(self.weights[current_dtype_nm], next_dtype)
        self.biases[next_dtype_nm] = cast_params(self.biases[current_dtype_nm], next_dtype)
Exemple #2
0
    def __call__(self, x, training=True):
        dtype_nm = dtype2str(self.current_dtype)

        np_dtype = tf2np_dtypes[self.current_dtype]
        nn = self.NN[dtype_nm]

        return nn(x.astype(np_dtype))
Exemple #3
0
 def get_markdown_lines(self):
     string = '|' + self._name + '.' + dtype2str(self._test_dtype) + '|'
     string += ''.join(
         str(round(time, 4)) + '|' for time in self._min_time_in_us)
     string += ''.join(
         str(round(item(self._min_time_in_us), 4)) + '|'
         for item in self._evaluator)
     return [string]
Exemple #4
0
    def predict(self, x, batch_size, training=False):
        print('predicting for {}'.format(self.current_dtype))
        dtype_nm = dtype2str(self.current_dtype)

        np_dtype = tf2np_dtypes[self.current_dtype]
        nn = self.NN[dtype_nm]

        return nn.predict(x.astype(np_dtype), batch_size, training)
Exemple #5
0
    def _train_epoch_dtype(self, sess, x):
        dtype_nm = dtype2str(self.current_dtype)
        np_dtype = tf2np_dtypes[self.current_dtype]
        x_ = x.astype(np_dtype)

        for i in range(0, int(x_.shape[0]/self.batch_size)):
            x_mb = x_[i*self.batch_size:(i+1)*self.batch_size]
            sess.run(self.train_step[dtype_nm], feed_dict={self.x[dtype_nm]: x_mb})
Exemple #6
0
    def _init_tf_vars(self):
        dtype_nm = dtype2str(self.current_dtype)
        self.y_pred[dtype_nm] = self.NN[dtype_nm](self.x[dtype_nm])

        self.cost[dtype_nm] = self.cost_fn(self.y_true[dtype_nm], self.y_pred[dtype_nm])

        self.grads[dtype_nm] = self.opt_fn.compute_gradients(self.cost[dtype_nm])
        self.train_step[dtype_nm] = self.opt_fn.apply_gradients(self.grads[dtype_nm])
Exemple #7
0
 def _init_NN(self, dtype=None):
     #dtype_nm = dtype2str(dtype)
     dtype_nm = dtype2str(self.current_dtype)
     self.NN[dtype_nm] = NeuralNet(self.n_nodes,
                                   self.l_act,
                                   self.weights[dtype_nm],
                                   self.biases[dtype_nm],
                                   self.current_dtype,
                                   with_bn=self.with_bn,
                                   dropout_rate=self.drop_rate)
Exemple #8
0
    def _cast_NN_2_next_dtype(self):
        dtype_nm = dtype2str(self.current_dtype)

        self.NN[dtype_nm] = NeuralNet(self.n_nodes,
                                      self.l_act,
                                      self.weights[dtype_nm],
                                      self.biases[dtype_nm],
                                      self.current_dtype,
                                      with_bn=self.with_bn,
                                      dropout_rate=self.drop_rate)
Exemple #9
0
 def run(self):
     ti.init(kernel_profiler=True, arch=self._arch)
     print("TestCase[%s.%s.%s]" % (self._func.__name__, arch_name(
         self._arch), dtype2str(self._test_dtype)))
     for test_dsize in self._test_dsize_list:
         print("test_dsize = %s" % (size2str(test_dsize)))
         self._min_time_in_us.append(
             self._func(self._arch, self._test_dtype, test_dsize,
                        MemoryBound.basic_repeat_times))
         time.sleep(0.2)
     ti.reset()
Exemple #10
0
 def _save_cases_info_as_json(self, suite_path='./'):
     for case in self.test_cases:  #for case [fill,saxpy,reduction]
         results_dict = {}
         for impl in self._cases_impl:  #find [ti.i32, ti.i64, ti.f32, ti.f64]
             if impl._name != case.__name__:
                 continue
             result_name = dtype2str(impl._test_dtype)
             results_dict[result_name] = impl.get_results_dict()
         case_path = os.path.join(suite_path, (case.__name__ + '.json'))
         with open(case_path, 'w') as f:
             case_str = dump2json(results_dict)
             print(case_str, file=f)
Exemple #11
0
 def _save_suite_info_as_json(self, suite_path='./'):
     info_dict = {
         'cases': [func.__name__ for func in self.test_cases],
         'dtype': [dtype2str(dtype) for dtype in self.test_dtype_list],
         'dsize': [size for size in self.test_dsize_list],
         'repeat': [
             scaled_repeat_times(self._arch, size, self.basic_repeat_times)
             for size in self.test_dsize_list
         ],
         'evaluator': [func.__name__ for func in self.evaluator]
     }
     info_path = os.path.join(suite_path, '_info.json')
     with open(info_path, 'w') as f:
         print(dump2json(info_dict), file=f)
Exemple #12
0
    def train_init(self,
                   dim_input,
                   y,
                   learning_rate,
                   opt_nm,
                   cost_nm,
                   batch_size,
                   n_epochs_per_precision,
                   dtype=None):
        self.lr = learning_rate
        self.batch_size = batch_size
        if isinstance(n_epochs_per_precision, list):
            self.n_epochs = n_epochs_per_precision
        else:
            self.n_epochs = [n_epochs_per_precision]

        assert len(self.n_epochs) != 1 or len(self.n_epochs) != self.len_dtypes, 'dim of n_epochs and dtypes does not match'
        if len(self.n_epochs) == 1 and self.len_dtypes > 1:
            self.n_epochs = self.n_epochs * self.len_dtypes

        self.x, self.y_pred, self.y_true = {}, {}, {}
        self.cost, self.grads, self.train_step = {}, {}, {}

        if opt_nm == 'adam':
            self.opt_fn = tf.train.AdamOptimizer(learning_rate=learning_rate, name=opt_nm)
        elif opt_nm == 'sgd':
            self.opt_fn = tf.train.GradientDescentOptimizer(learning_rate, name=opt_nm)
        elif opt_nm == 'mom':
            self.opt_fn = tf.train.MomentumOptimizer(learning_rate, momentum=0.9, name=opt_nm)
        elif opt_nm == 'nest_mom':
            self.opt_fn = tf.train.MomentumOptimizer(learning_rate, momentum=0.9, use_nesterov=True, name=opt_nm)
        else:
            NotImplementedError

        if cost_nm == 'mse':
            self.cost_fn = lambda y_t, y_p: tf.reduce_mean((y_t - y_p) ** 2)
        else:
            NotImplementedError

        for dtype in self.dtypes:
            dtype_nm = dtype2str(dtype)

            self.x[dtype_nm] = tf.placeholder(dtype, shape=(None, dim_input), name='x'+dtype_nm)
            self.y_true[dtype_nm] = tf.constant(y, dtype=dtype, name='y_true'+dtype_nm)

        self._init_tf_vars()

        self.train_inited = True
Exemple #13
0
 def _init_params(self, dtype=None):
     #dtype_nm = dtype2str(dtype)
     dtype_nm = dtype2str(self.current_dtype)
     with tf.name_scope('network_parameters'):
         if self.w_init_method is None and self.b_init_method is None:
             self.weights[dtype_nm] = init_param(self.n_nodes,
                                                 params='weights',
                                                 trainable=self.trainable,
                                                 dtype=self.current_dtype,
                                                 tb_flag=self.tb_flag)
             self.biases[dtype_nm] = init_param(self.n_nodes,
                                                params='biases',
                                                trainable=self.trainable,
                                                dtype=self.current_dtype,
                                                tb_flag=self.tb_flag)
         else:
             raise NotImplementedError