예제 #1
0
def main():

  strategy = tf.distribute.MirroredStrategy();
  # yolov3 model
  with strategy.scope():
    yolov3 = YOLOv3((416,416,3,), 80);
  @tf.function
  def loss(labels, outputs):
    return Loss((416,416,3,),80)([outputs[0], outputs[1], outputs[2], labels[0], labels[1], labels[2]]);
  yolov3.compile(optimizer = tf.keras.optimizers.Adam(1e-4), loss = loss);
  # load downloaded dataset
  trainset_filenames = [join('trainset', filename) for filename in listdir('trainset')];
  testset_filenames = [join('testset', filename) for filename in listdir('testset')];
  trainset = tf.data.TFRecordDataset(trainset_filenames).map(parse_function_generator(80)).repeat(-1).shuffle(batch_size).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE);
  testset = tf.data.TFRecordDataset(testset_filenames).map(parse_function_generator(80)).repeat(-1).shuffle(batch_size).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE);
  yolov3.fit(trainset, epochs = 100, validation_data = testset);
  yolov3.save('yolov3.h5');
예제 #2
0
def train():

    category = pd.read_pickle('category.pkl')
    dilations = [2**i for i in range(10)] * 5
    receptive_field = calculate_receptive_field(dilations, 2, 32)
    wavenet = WaveNet(dilations=dilations,
                      use_glob_cond=True,
                      glob_cls_num=len(category),
                      glob_embed_dim=5)
    optimizer = tf.keras.optimizers.Adam(1e-3)
    # load dataset
    trainset = tf.data.TFRecordDataset(join(
        'dataset', 'trainset.tfrecord')).repeat(-1).map(
            parse_function_generator()).batch(batch_size).prefetch(
                tf.data.experimental.AUTOTUNE)
    # restore from existing checkpoint
    if False == exists('checkpoints'): mkdir('checkpoints')
    checkpoint = tf.train.Checkpoint(model=wavenet, optimizer=optimizer)
    checkpoint.restore(tf.train.latest_checkpoint('checkpoints'))
    # create log
    log = tf.summary.create_file_writer('checkpoints')
    # train model
    avg_loss = tf.keras.metrics.Mean(name='loss', dtype=tf.float32)
    for audios, person_id in trainset:
        inputs = audios[:, :-1, :]
        # inputs.shape = (batch, receptive_field + audio_length - 1, 1)
        target = audios[:, receptive_field:, :]
        # target.shape = (batch, audio_length, 1)
        with tf.GradientTape() as tape:
            outputs = wavenet([inputs, person_id])
            # outputs.shape = (batch, audio_length, 1)
            loss = tf.keras.losses.SparseCategoricalCrossentropy()(target,
                                                                   outputs)
        avg_loss.update_state(loss)
        # write log
        if tf.equal(optimizer.iterations % 100, 0):
            with log.as_default():
                tf.summary.scalar('loss',
                                  avg_loss.result(),
                                  step=optimizer.iterations)
            print('Step #%d Loss: %.6f' %
                  (optimizer.iterations, avg_loss.result()))
            if avg_loss.result() < 0.01: break
            avg_loss.reset_states()
        grads = tape.gradient(loss, wavenet.trainable_variables)
        optimizer.apply_gradients(zip(grads, wavenet.trainable_variables))
    # save the network structure with weights
    if False == exists('model'): mkdir('model')
    wavenet.save(join('model', 'wavenet.h5'))
예제 #3
0
def main():

    yolov5l = YOLOv5_large((
        608,
        608,
        3,
    ), 80)
    loss1 = Loss((
        608,
        608,
        3,
    ), 0, 80)
    loss2 = Loss((
        608,
        608,
        3,
    ), 1, 80)
    loss3 = Loss((
        608,
        608,
        3,
    ), 2, 80)
    if exists('./checkpoints/ckpt'):
        yolov5l.load_weights('./checkpoints/ckpt/variables/variables')
    optimizer = tf.keras.optimizers.Adam(1e-4)
    yolov5l.compile(optimizer=optimizer,
                    loss={
                        'output1':
                        lambda labels, outputs: loss1([outputs, labels]),
                        'output2':
                        lambda labels, outputs: loss2([outputs, labels]),
                        'output3':
                        lambda labels, outputs: loss3([outputs, labels])
                    })

    class SummaryCallback(tf.keras.callbacks.Callback):
        def __init__(self, eval_freq=100):
            self.eval_freq = eval_freq
            testset = tf.data.TFRecordDataset(testset_filenames).map(
                parse_function).repeat(-1)
            self.iter = iter(testset)
            self.train_loss = tf.keras.metrics.Mean(name='train loss',
                                                    dtype=tf.float32)
            self.log = tf.summary.create_file_writer('./checkpoints')

        def on_batch_begin(self, batch, logs=None):
            pass

        def on_batch_end(self, batch, logs=None):
            self.train_loss.update_state(logs['loss'])
            if batch % self.eval_freq == 0:
                image, bbox, labels = next(self.iter)
                image = image.numpy().astype('uint8')
                predictor = Predictor(yolov5l=yolov5l)
                boundings = predictor.predict(image)
                color_map = dict()
                for bounding in boundings:
                    if bounding[5].numpy().astype('int32') not in color_map:
                        color_map[bounding[5].numpy().astype('int32')] = tuple(
                            np.random.randint(low=0, high=256,
                                              size=(3, )).tolist())
                    clr = color_map[bounding[5].numpy().astype('int32')]
                    cv2.rectangle(image,
                                  tuple(bounding[0:2].numpy().astype('int32')),
                                  tuple(bounding[2:4].numpy().astype('int32')),
                                  clr, 1)
                    cv2.putText(
                        image,
                        predictor.getClsName(
                            bounding[5].numpy().astype('int32')),
                        tuple(bounding[0:2].numpy().astype('int32')),
                        cv2.FONT_HERSHEY_PLAIN, 1, clr, 2)
                image = tf.expand_dims(image, axis=0)
                with self.log.as_default():
                    tf.summary.scalar('train loss',
                                      self.train_loss.result(),
                                      step=optimizer.iterations)
                    tf.summary.image('detect',
                                     image[..., ::-1],
                                     step=optimizer.iterations)
                self.train_loss.reset_states()

        def on_epoch_begin(self, epoch, logs=None):
            pass

        def on_epoch_end(self, batch, logs=None):
            pass

    # load downloaded dataset
    trainset_filenames = [
        join('trainset', filename) for filename in listdir('trainset')
    ]
    testset_filenames = [
        join('testset', filename) for filename in listdir('testset')
    ]
    trainset = tf.data.TFRecordDataset(trainset_filenames).map(
        parse_function_generator(80)).shuffle(batch_size).batch(
            batch_size).prefetch(tf.data.experimental.AUTOTUNE)
    testset = tf.data.TFRecordDataset(testset_filenames).map(
        parse_function_generator(80)).shuffle(batch_size).batch(
            batch_size).prefetch(tf.data.experimental.AUTOTUNE)
    callbacks = [
        tf.keras.callbacks.TensorBoard(log_dir='./checkpoints'),
        tf.keras.callbacks.ModelCheckpoint(filepath='./checkpoints/ckpt',
                                           save_freq=10000),
        SummaryCallback(),
    ]
    yolov5l.fit(trainset,
                epochs=100,
                validation_data=testset,
                callbacks=callbacks)
    yolov5l.save('yolov5l.h5')
예제 #4
0
def main():

    gpus = tf.config.experimental.list_physical_devices('GPU')
    [tf.config.experimental.set_memory_growth(gpu, True) for gpu in gpus]
    # yolov5l model
    yolov5l = YOLOv5_large((608, 608, 3), 80)
    loss1 = Loss((608, 608, 3), 0, 80)
    loss2 = Loss((608, 608, 3), 1, 80)
    loss3 = Loss((608, 608, 3), 2, 80)
    #optimizer = tf.keras.optimizers.Adam(tf.keras.optimizers.schedules.ExponentialDecay(1e-5, decay_steps = 110000, decay_rate = 0.99));
    optimizer = tf.keras.optimizers.Adam(1e-5)
    checkpoint = tf.train.Checkpoint(model=yolov5l, optimizer=optimizer)
    train_loss = tf.keras.metrics.Mean(name='train loss', dtype=tf.float32)
    test_loss = tf.keras.metrics.Mean(name='test loss', dtype=tf.float32)
    # load downloaded dataset
    trainset_filenames = [
        join('trainset', filename) for filename in listdir('trainset')
    ]
    testset_filenames = [
        join('testset', filename) for filename in listdir('testset')
    ]
    trainset = tf.data.TFRecordDataset(trainset_filenames).map(
        parse_function_generator(80)).repeat(-1).shuffle(batch_size).batch(
            batch_size).prefetch(tf.data.experimental.AUTOTUNE)
    testset = tf.data.TFRecordDataset(testset_filenames).map(
        parse_function_generator(80)).repeat(-1).shuffle(batch_size).batch(
            batch_size).prefetch(tf.data.experimental.AUTOTUNE)
    validationset = tf.data.TFRecordDataset(testset_filenames).map(
        parse_function).repeat(-1)
    trainset_iter = iter(trainset)
    testset_iter = iter(testset)
    validationset_iter = iter(validationset)
    # restore from existing checkpoint
    if False == exists('checkpoints'): mkdir('checkpoints')
    checkpoint.restore(tf.train.latest_checkpoint('checkpoints'))
    # tensorboard summary
    log = tf.summary.create_file_writer('checkpoints')
    # train model
    while True:
        images, labels = next(trainset_iter)
        labels1, labels2, labels3 = labels
        with tf.GradientTape() as tape:
            outputs1, outputs2, outputs3 = yolov5l(images)
            loss = loss1([outputs1, labels1]) + loss2(
                [outputs2, labels2]) + loss3([outputs3, labels3])
        # check whether the loss numberic is correct
        if tf.math.reduce_any(tf.math.is_nan(loss)) == True:
            print("NaN was detected in loss, skip the following steps!")
            continue
        grads = tape.gradient(loss, yolov5l.trainable_variables)
        # check whether the grad numerics is correct
        if tf.math.reduce_any([
                tf.math.reduce_any(tf.math.is_nan(grad)) for grad in grads
        ]) == True:
            print("NaN was detected in gradients, skip gradient apply!")
            continue
        optimizer.apply_gradients(zip(grads, yolov5l.trainable_variables))
        train_loss.update_state(loss)
        # save model
        if tf.equal(optimizer.iterations % 10000, 0):
            # save checkpoint every 1000 steps
            checkpoint.save(join('checkpoints', 'ckpt'))
            yolov5l.save('yolov5l.h5')
        if tf.equal(optimizer.iterations % 100, 0):
            # evaluate
            for i in range(10):
                images, labels = next(testset_iter)
                # images.shape = (b, h, w, 3)
                outputs = yolov5l(images)
                loss = yolov3_loss([*outputs, *labels])
                test_loss.update_state(loss)
            # visualize
            image, bbox, labels = next(validationset_iter)
            # image.shape = (h, w, 3)
            image = image.numpy().astype('uint8')
            predictor = Predictor(yolov5l=yolov5l)
            boundings = predictor.predict(image)
            color_map = dict()
            for bounding in boundings:
                if bounding[5].numpy().astype('int32') not in color_map:
                    color_map[bounding[5].numpy().astype('int32')] = tuple(
                        np.random.randint(low=0, high=256,
                                          size=(3, )).tolist())
                clr = color_map[bounding[5].numpy().astype('int32')]
                cv2.rectangle(image,
                              tuple(bounding[0:2].numpy().astype('int32')),
                              tuple(bounding[2:4].numpy().astype('int32')),
                              clr, 1)
                cv2.putText(
                    image,
                    predictor.getClsName(bounding[5].numpy().astype('int32')),
                    tuple(bounding[0:2].numpy().astype('int32')),
                    cv2.FONT_HERSHEY_PLAIN, 1, clr, 2)
            image = tf.expand_dims(image, axis=0)
            # write log
            with log.as_default():
                tf.summary.scalar('train loss',
                                  train_loss.result(),
                                  step=optimizer.iterations)
                tf.summary.scalar('test loss',
                                  test_loss.result(),
                                  step=optimizer.iterations)
                tf.summary.image('detect',
                                 image[..., ::-1],
                                 step=optimizer.iterations)
            print('Step #%d Train Loss: %.6f Test Loss: %.6f' %
                  (optimizer.iterations, train_loss.result(),
                   test_loss.result()))
            # break condition
            #if train_loss.result() < 0.001: break;
            # reset
            train_loss.reset_states()
            test_loss.reset_states()
    yolov5l.save('yolov5l.h5')
예제 #5
0
def main():

    # models
    cycleGAN = CycleGAN()
    optimizerGA = tf.keras.optimizers.Adam(
        tf.keras.optimizers.schedules.PiecewiseConstantDecay(
            boundaries=[
                dataset_size * 100 + i * dataset_size * 100 / 4
                for i in range(5)
            ],
            values=list(reversed([i * 2e-4 / 5 for i in range(6)]))),
        beta_1=0.5)
    optimizerGB = tf.keras.optimizers.Adam(
        tf.keras.optimizers.schedules.PiecewiseConstantDecay(
            boundaries=[
                dataset_size * 100 + i * dataset_size * 100 / 4
                for i in range(5)
            ],
            values=list(reversed([i * 2e-4 / 5 for i in range(6)]))),
        beta_1=0.5)
    optimizerDA = tf.keras.optimizers.Adam(
        tf.keras.optimizers.schedules.PiecewiseConstantDecay(
            boundaries=[
                dataset_size * 100 + i * dataset_size * 100 / 4
                for i in range(5)
            ],
            values=list(reversed([i * 2e-4 / 5 for i in range(6)]))),
        beta_1=0.5)
    optimizerDB = tf.keras.optimizers.Adam(
        tf.keras.optimizers.schedules.PiecewiseConstantDecay(
            boundaries=[
                dataset_size * 100 + i * dataset_size * 100 / 4
                for i in range(5)
            ],
            values=list(reversed([i * 2e-4 / 5 for i in range(6)]))),
        beta_1=0.5)

    # load dataset
    '''
  A = tf.data.TFRecordDataset(os.path.join('dataset', 'A.tfrecord')).map(parse_function_generator(img_shape)).shuffle(batch_size).batch(batch_size).__iter__();
  B = tf.data.TFRecordDataset(os.path.join('dataset', 'B.tfrecord')).map(parse_function_generator(img_shape)).shuffle(batch_size).batch(batch_size).__iter__();
  '''
    A = iter(
        tfds.load(name='cycle_gan/horse2zebra', split="trainA",
                  download=False).repeat(-1).map(
                      parse_function_generator()).shuffle(batch_size).batch(
                          batch_size).prefetch(tf.data.experimental.AUTOTUNE))
    B = iter(
        tfds.load(name='cycle_gan/horse2zebra', split="trainB",
                  download=False).repeat(-1).map(
                      parse_function_generator()).shuffle(batch_size).batch(
                          batch_size).prefetch(tf.data.experimental.AUTOTUNE))
    testA = iter(
        tfds.load(name='cycle_gan/horse2zebra', split='testA',
                  download=False).repeat(-1).map(
                      parse_function_generator(isTrain=False)).batch(1))
    testB = iter(
        tfds.load(name='cycle_gan/horse2zebra', split='testB',
                  download=False).repeat(-1).map(
                      parse_function_generator(isTrain=False)).batch(1))
    # restore from existing checkpoint
    checkpoint = tf.train.Checkpoint(GA=cycleGAN.GA,
                                     GB=cycleGAN.GB,
                                     DA=cycleGAN.DA,
                                     DB=cycleGAN.DB,
                                     optimizerGA=optimizerGA,
                                     optimizerGB=optimizerGB,
                                     optimizerDA=optimizerDA,
                                     optimizerDB=optimizerDB)
    checkpoint.restore(tf.train.latest_checkpoint('checkpoints'))
    # create log
    log = tf.summary.create_file_writer('checkpoints')
    # train model
    avg_ga_loss = tf.keras.metrics.Mean(name='GA loss', dtype=tf.float32)
    avg_gb_loss = tf.keras.metrics.Mean(name='GB loss', dtype=tf.float32)
    avg_da_loss = tf.keras.metrics.Mean(name='DA loss', dtype=tf.float32)
    avg_db_loss = tf.keras.metrics.Mean(name='DB loss', dtype=tf.float32)
    while True:
        imageA, _ = next(A)
        imageB, _ = next(B)
        with tf.GradientTape(persistent=True) as tape:
            outputs = cycleGAN((imageA, imageB))
            GA_loss = cycleGAN.GA_loss(outputs)
            GB_loss = cycleGAN.GB_loss(outputs)
            DA_loss = cycleGAN.DA_loss(outputs)
            DB_loss = cycleGAN.DB_loss(outputs)
        # calculate discriminator gradients
        da_grads = tape.gradient(DA_loss, cycleGAN.DA.trainable_variables)
        avg_da_loss.update_state(DA_loss)
        db_grads = tape.gradient(DB_loss, cycleGAN.DB.trainable_variables)
        avg_db_loss.update_state(DB_loss)
        # calculate generator gradients
        ga_grads = tape.gradient(GA_loss, cycleGAN.GA.trainable_variables)
        avg_ga_loss.update_state(GA_loss)
        gb_grads = tape.gradient(GB_loss, cycleGAN.GB.trainable_variables)
        avg_gb_loss.update_state(GB_loss)
        # update discriminator weights
        optimizerDA.apply_gradients(
            zip(da_grads, cycleGAN.DA.trainable_variables))
        optimizerDB.apply_gradients(
            zip(db_grads, cycleGAN.DB.trainable_variables))
        # update generator weights
        optimizerGA.apply_gradients(
            zip(ga_grads, cycleGAN.GA.trainable_variables))
        optimizerGB.apply_gradients(
            zip(gb_grads, cycleGAN.GB.trainable_variables))
        if tf.equal(optimizerGA.iterations % 500, 0):
            imageA, _ = next(testA)
            imageB, _ = next(testB)
            outputs = cycleGAN((imageA, imageB))
            real_A = tf.cast(tf.clip_by_value((imageA + 1) * 127.5,
                                              clip_value_min=0.,
                                              clip_value_max=255.),
                             dtype=tf.uint8)
            real_B = tf.cast(tf.clip_by_value((imageB + 1) * 127.5,
                                              clip_value_min=0.,
                                              clip_value_max=255.),
                             dtype=tf.uint8)
            fake_B = tf.cast(tf.clip_by_value((outputs[1] + 1) * 127.5,
                                              clip_value_min=0.,
                                              clip_value_max=255.),
                             dtype=tf.uint8)
            fake_A = tf.cast(tf.clip_by_value((outputs[7] + 1) * 127.5,
                                              clip_value_min=0.,
                                              clip_value_max=255.),
                             dtype=tf.uint8)
            with log.as_default():
                tf.summary.scalar('generator A loss',
                                  avg_ga_loss.result(),
                                  step=optimizerGA.iterations)
                tf.summary.scalar('generator B loss',
                                  avg_gb_loss.result(),
                                  step=optimizerGB.iterations)
                tf.summary.scalar('discriminator A loss',
                                  avg_da_loss.result(),
                                  step=optimizerDA.iterations)
                tf.summary.scalar('discriminator B loss',
                                  avg_db_loss.result(),
                                  step=optimizerDB.iterations)
                tf.summary.image('real A', real_A, step=optimizerGA.iterations)
                tf.summary.image('fake B', fake_B, step=optimizerGA.iterations)
                tf.summary.image('real B', real_B, step=optimizerGA.iterations)
                tf.summary.image('fake A', fake_A, step=optimizerGA.iterations)
            print('Step #%d GA Loss: %.6f GB Loss: %.6f DA Loss: %.6f DB Loss: %.6f lr: %.6f' % \
                  (optimizerGA.iterations, avg_ga_loss.result(), avg_gb_loss.result(), avg_da_loss.result(), avg_db_loss.result(), \
                  optimizerGA._hyper['learning_rate'](optimizerGA.iterations)))
            avg_ga_loss.reset_states()
            avg_gb_loss.reset_states()
            avg_da_loss.reset_states()
            avg_db_loss.reset_states()
        if tf.equal(optimizerGA.iterations % 10000, 0):
            # save model
            checkpoint.save(os.path.join('checkpoints', 'ckpt'))
        if GA_loss < 0.01 and GB_loss < 0.01 and DA_loss < 0.01 and DB_loss < 0.01:
            break
    # save the network structure with weights
    if False == os.path.exists('models'): os.mkdir('models')
    cycleGAN.GA.save(os.path.join('models', 'GA.h5'))
    cycleGAN.GB.save(os.path.join('models', 'GB.h5'))
    cycleGAN.DA.save(os.path.join('models', 'DA.h5'))
    cycleGAN.DB.save(os.path.join('models', 'DB.h5'))
예제 #6
0
def main():

  gpus = tf.config.experimental.list_physical_devices('GPU');
  [tf.config.experimental.set_memory_growth(gpu, True) for gpu in gpus];
  # yolov3 model
  yolov3 = YOLOv3((416,416,3), 80);
  yolov3_loss = Loss((416,416,3), 80);
  #optimizer = tf.keras.optimizers.Adam(tf.keras.optimizers.schedules.ExponentialDecay(1e-5, decay_steps = 110000, decay_rate = 0.99));
  optimizer = tf.keras.optimizers.Adam(1e-5);
  checkpoint = tf.train.Checkpoint(model = yolov3, optimizer = optimizer);
  train_loss = tf.keras.metrics.Mean(name = 'train loss', dtype = tf.float32);
  test_loss = tf.keras.metrics.Mean(name = 'test loss', dtype = tf.float32);
  # load downloaded dataset
  trainset_filenames = [join('trainset', filename) for filename in listdir('trainset')];
  testset_filenames = [join('testset', filename) for filename in listdir('testset')];
  trainset = tf.data.TFRecordDataset(trainset_filenames).map(parse_function_generator(80)).repeat(-1).shuffle(batch_size).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE);
  testset = tf.data.TFRecordDataset(testset_filenames).map(parse_function_generator(80)).repeat(-1).shuffle(batch_size).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE);
  validationset = tf.data.TFRecordDataset(testset_filenames).map(parse_function).repeat(-1);
  trainset_iter = iter(trainset);
  testset_iter = iter(testset);
  validationset_iter = iter(validationset);
  # restore from existing checkpoint
  if False == exists('checkpoints'): mkdir('checkpoints');
  checkpoint.restore(tf.train.latest_checkpoint('checkpoints'));
  # tensorboard summary
  log = tf.summary.create_file_writer('checkpoints');
  # train model
  while True:
    images, labels = next(trainset_iter);
    with tf.GradientTape() as tape:
      outputs = yolov3(images);
      loss = yolov3_loss([*outputs, *labels]);
    # check whether the loss numberic is correct
    if tf.math.reduce_any(tf.math.is_nan(loss)) == True:
      print("NaN was detected in loss, skip the following steps!");
      continue;
    grads = tape.gradient(loss, yolov3.trainable_variables);
    # check whether the grad numerics is correct
    if tf.math.reduce_any([tf.math.reduce_any(tf.math.is_nan(grad)) for grad in grads]) == True:
      print("NaN was detected in gradients, skip gradient apply!");
      continue;
    optimizer.apply_gradients(zip(grads, yolov3.trainable_variables));
    train_loss.update_state(loss);
    # save model
    if tf.equal(optimizer.iterations % 10000, 0):
      # save checkpoint every 1000 steps
      checkpoint.save(join('checkpoints','ckpt'));
      yolov3.save('yolov3.h5');
    if tf.equal(optimizer.iterations % 100, 0):
      # evaluate
      for i in range(10):
        images, labels = next(testset_iter); # images.shape = (b, h, w, 3)
        outputs = yolov3(images);
        loss = yolov3_loss([*outputs, *labels]);
        test_loss.update_state(loss);
      # visualize
      image, bbox, labels = next(validationset_iter); # image.shape = (h, w, 3)
      image = image.numpy().astype('uint8');
      predictor = Predictor(yolov3 = yolov3);
      boundings = predictor.predict(image);
      color_map = dict();
      for bounding in boundings:
        if bounding[5].numpy().astype('int32') in color_map:
          clr = color_map[bounding[5].numpy().astype('int32')];
        else:
          color_map[bounding[5].numpy().astype('int32')] = tuple(np.random.randint(low = 0, high = 256, size = (3,)).tolist());
          clr = color_map[bounding[5].numpy().astype('int32')];
        cv2.rectangle(image, tuple(bounding[0:2].numpy().astype('int32')), tuple(bounding[2:4].numpy().astype('int32')), clr, 5);
      image = tf.expand_dims(image, axis = 0);
      # write log
      with log.as_default():
        tf.summary.scalar('train loss', train_loss.result(), step = optimizer.iterations);
        tf.summary.scalar('test loss', test_loss.result(), step = optimizer.iterations);
        tf.summary.image('detect', image[...,::-1], step = optimizer.iterations);
      print('Step #%d Train Loss: %.6f Test Loss: %.6f' % (optimizer.iterations, train_loss.result(), test_loss.result()));
      # break condition
      #if train_loss.result() < 0.001: break;
      # reset
      train_loss.reset_states();
      test_loss.reset_states();
  yolov3.save('yolov3.h5');