class Precision_16_32Test(unittest.TestCase): def setUp(self): self.precision_instance = Precision('16.32') def test_half_precision_instanciation(self): assert (self.precision_instance.compute_precision == tf.float16) assert (self.precision_instance.weight_update_precision == tf.float32) def test_set_precision(self): self.precision_instance.apply() input_layer = tf.keras.layers.Input(shape=(None, 1)) layer = tf.keras.layers.Dense(1) assert (layer.compute_dtype == tf.float16) output = layer(input_layer) assert (output.dtype == tf.float16) model = tf.keras.Model(inputs=input_layer, outputs=output) with tf.GradientTape() as tape: input_data = np.array([[1]]) output_data = model(input_data) grad = tape.gradient(output_data, model.trainable_variables) # note gradients dtype are tf.float32 assert (all([g.dtype == tf.float32 for g in grad])) # note that model weights are also tf.float32 assert (all( [var.dtype == tf.float32 for var in model.trainable_variables]))
class Precision32_32Test(unittest.TestCase): def setUp(self): self.precision_instance = Precision('32.32') def test_full_precision_instantiation(self): assert (self.precision_instance.compute_precision == tf.float32) assert (self.precision_instance.weight_update_precision == tf.float32) def test_set_precision(self): self.precision_instance.apply() input_layer = tf.keras.layers.Input(shape=(1)) layer = tf.keras.layers.Dense(1) assert (layer.dtype == tf.float32) output = layer(input_layer) assert (output.dtype == tf.float32) model = tf.keras.Model(inputs=input_layer, outputs=output) with tf.GradientTape() as tape: input_data = np.array([[1]]) output_data = model(input_data) grads = tape.gradient(output_data, model.trainable_variables) assert (all([g.dtype == tf.float32 for g in grads]))
def test_datenum(self): p = Precision() d = [i for i in p.dtrange(datetime(2018, 6, 12), datetime(2025, 12, 12), {'days': 1, 'hours':2})] x = [p.datenum(i.date()) for i in d] self.assertEqual(len(x), 2530, 'Failed datenum!') y = [p.datenum(i.year, i.month, i.day) for i in d] self.assertEqual(len(x), 2530, 'Failed datenum 2!')
def test_unique(self): p = Precision() l0 = [0, 1, 1, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7] self.assertEqual( dict({ 0: 4, 1: 5, 2: 6, 3: 7, 4: 8, 5: 9, 6: 10, 7: 11 }), p.unique(l0, 4), 'Failed unique 1!') self.assertEqual(list([0, 1, 2, 3, 4, 5, 6, 7]), p.unique(l0), 'Failed unique 2!') x = p.unique(numpy.array([l0])) self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], list(x[0][0].flatten()), 'Failed unique 3!') self.assertEqual([0, 1, 3, 4, 5, 7, 9, 10], list(x[0][1].flatten()), 'Failed unique 4!') self.assertEqual([0, 1, 1, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7], list(x[0][2].flatten()), 'Failed unique 5!') self.assertEqual([1, 2, 1, 1, 2, 2, 1, 3], list(x[0][3].flatten()), 'Failed unique 6!')
def test_num2cell(self): p = Precision() x = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], numpy.int64) z = p.num2cell(x) self.assertEqual(3, len(z), 'Failed num2cell!') x = numpy.ones((170, 30)) z = p.num2cell(x) self.assertEqual(170, len(z), 'Failed num2cell!')
def test_overlap1d(self): p = Precision() l0 = ['A', 'B', 'A'] l1 = ['A', 'D', 'A'] self.assertEqual([[0, ('A', 'A')], [2, ('A', 'A')]], p.overlap1d(l0, l1), 'Failed overlap1d 1!') self.assertEqual([[0, 2], [], [0, 2]], list(p.overlap1d(l0, l1, 0)), 'Failed overlap1d 2!')
def test_zero_or_one(self): p = Precision() l0 = ['A', 'B', 'A', 'B', 'C'] self.assertEqual([1, 0, 1, 0, 0], p.zero_or_one(l0, 'A'), 'Failed zero or one A!') self.assertEqual([0, 1, 0, 1, 0], p.zero_or_one(l0, 'B'), 'Failed zero or one B!') self.assertEqual([0, 0, 0, 0, 1], p.zero_or_one(l0, 'C'), 'Failed zero or one C!')
def test_histc(self): p = Precision() v = numpy.array([[1.5, 2.0, 3], [4, 5.9, 6]], numpy.int64) x = p.histc(v, numpy.amax(v) + 1) self.assertEqual([1, 1, 1, 0, 1, 1, 1], list(x[0].flatten()), 'Failed histc 1!') self.assertEqual([ 1.0, 1.7142857142857144, 2.428571428571429, 3.142857142857143, 3.857142857142857, 4.571428571428571, 5.285714285714286, 6.0 ], list(x[1].flatten()), 'Failed histc 1!')
def test_strcat(self): p = Precision() df = pandas.DataFrame(data={ 'A': [1, 2], 'B': [3, 4] }, dtype=numpy.int8) self.assertEqual(list(['1', '2']), p.strcat(df, 'A'), 'Failed num2cell A!') self.assertEqual(list(['3', '4']), p.strcat(df, 'B'), 'Failed num2cell B!')
def test_full_precision(self): full_precision = Precision('32.32') full_precision.apply() model = self.get_model() computed_value = model(self.input_value).numpy() # checking it did NOT underflow self.assertAlmostEqual(self.expected_value[0][0], computed_value[0][0], places=10)
def test_mixed_precision(self): mixed_precision = Precision('16.32') mixed_precision.apply() model = self.get_model() computed_value = model(self.input_value).numpy() # checking for underflow self.assertNotAlmostEqual(self.expected_value[0][0], computed_value[0][0], places=10)
def test_half_precision_update(self): Precision('16.16').apply() weight = self.perform_weight_update() # weight didn't change, weight update done in 16 bits self.assertEqual(self.weight_initial_value, weight)
def test_mixed_precision_update(self): Precision('16.32').apply() weight = self.perform_weight_update() # weight changed, weight update done in 32 bits self.assertNotEqual(self.weight_initial_value, weight)
def test_prctile(self): p = Precision() d = [i for i in p.dtrange(datetime(2018, 6, 12), datetime(2059, 12, 12), {'days': 1, 'hours':2})] x = [p.datenum(i.date()) for i in d] self.assertEqual(len(x), 13992, 'Failed datenum!') x1 = p.prctile(x, 5) x2 = p.prctile(x, 95) r = (x2 - x1) self.assertEqual(x1, 737980.1, 'Failed prctile 5 low!') self.assertEqual(x2, 751621.9, 'Failed prctile 95 high!') self.assertEqual(r, 13641.800000000047, 'Failed prctile delta r!')
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name)) history = model.fit(data["X_train"], data["y_train"], batch_size=ObtenerDatos.BATCH_SIZE, epochs=ObtenerDatos.EPOCHS, validation_data=(data["X_test"], data["y_test"]), callbacks=[checkpointer, tensorboard], verbose=1) model.save(os.path.join("results", model_name) + ".h5") # evaluate the model mse, mae = model.evaluate(data["X_test"], data["y_test"], verbose=0) # calculate the mean absolute error (inverse scaling) mean_absolute_error = data["column_scaler"]["close"].inverse_transform( [[mae]])[0][0] precision = Precision() # Predecir path_model = "results/" + model_name + ".h5" # predicted_price = predicted_price_open, predicted_price_high, predicted_price_low, predicted_price_close new_model = keras.models.load_model(path_model) # priceOpen = predict(new_model, data)[0] priceHigh = predict(new_model, data)[1] priceLow = predict(new_model, data)[2] priceClose = predict(new_model, data)[3] # Registrar datos del modelo ReporteTensorflow( symbol=ObtenerDatos.ticker_n, update_model=f'{ObtenerDatos.from_date} - {ObtenerDatos.to_date}',
def test_strcmp(self): p = Precision() self.assertEqual(True, p.strcmp('A', 'A'), 'Failed strcmp 0!') self.assertEqual(False, p.strcmp('B', 'C'), 'Failed strcmp 1!')
def test_sprintf(self): p = Precision() self.assertEqual('50', p.sprintf('%d', 50), 'Failed sprintf %d!') self.assertEqual('WORK', p.sprintf('%s', 'WORK'), 'Failed sprintf %s!')
def test_num2str(self): p = Precision() self.assertEqual('5', p.num2str(5), 'Failed str to num!') self.assertEqual('5.2', p.num2str(5.2), 'Failed str to float!') self.assertEqual('5.459999', p.num2str(5.459999), 'Failed str to float!')
def test_str2num(self): p = Precision() self.assertEqual(5, p.str2num('5'), 'Failed int to str!') self.assertEqual(5.2, p.str2num('5.2'), 'Failed float to str!') self.assertEqual(5.459999, p.str2num('5.459999'), 'Failed float to str!')
def test_cell2mat(self): p = Precision() m = [[1, 2], [3, 4]] self.assertEqual(len(m), len(p.cell2mat(m)), 'Failed matrix!') self.assertEqual(2, len(p.cell2mat('1 2; 3 4')), 'Failed str!')
def test_tic_toc(self): p = Precision() p.tic() time.sleep(2) p.toc()
def test_overlap2d(self): p = Precision() a, b = p.overlap2d(numpy.array([1, 2, 4, 5]), numpy.array([4, 6, 10, 9, 1])) self.assertEqual([0, 2], list(a.flatten()), 'Failed overlap2d 1!') self.assertEqual([4, 0], list(b.flatten()), 'Failed overlap2d 2!')
def setUp(self): self.precision_instance = Precision('16.32')
mean_throughput = np.mean( [epoch['elements_per_second'] for epoch in results_dict['epochs']]) * micro_batch_size return mean_throughput if __name__ == '__main__': parser = argparse.ArgumentParser( description='TF2 classification dataset benchmark') parser = add_arguments(parser) args = parser.parse_args() logging.basicConfig(level=logging.INFO) logging.info(f'args = {args}') fp_precision = Precision(args.precision) eight_bit_transfer = EightBitTransfer( compute_precision=fp_precision.compute_precision ) if args.eight_bit_transfer else None ds, _, ds_size, _ = DatasetFactory.get_dataset( dataset_name=args.dataset, dataset_path=args.dataset_path, split=args.split, img_datatype=fp_precision.compute_precision, micro_batch_size=args.micro_batch_size, eight_bit_transfer=eight_bit_transfer) throughput = estimate_ds_throughput(ds, ds_size, args.num_epochs, args.micro_batch_size)