Exemple #1
0
 def test_location_3d(self):
     with custom_object_scope({'Location3D': layers.Location3D}):
         testing_utils.layer_test(layers.Location3D,
                                  kwargs={
                                      'in_shape': (11, 12, 10, 4),
                                      'data_format': 'channels_last'
                                  },
                                  input_shape=(3, 11, 12, 10, 4))
         testing_utils.layer_test(layers.Location3D,
                                  kwargs={
                                      'in_shape': (4, 11, 12, 10),
                                      'data_format': 'channels_first'
                                  },
                                  input_shape=(3, 4, 11, 12, 10))
Exemple #2
0
 def test_location_2d(self):
     with custom_object_scope({'Location2D': layers.Location2D}):
         testing_utils.layer_test(layers.Location2D,
                                  kwargs={
                                      'in_shape': (5, 6, 4),
                                      'data_format': 'channels_last'
                                  },
                                  input_shape=(3, 5, 6, 4))
         testing_utils.layer_test(layers.Location2D,
                                  kwargs={
                                      'in_shape': (4, 5, 6),
                                      'data_format': 'channels_first'
                                  },
                                  input_shape=(3, 4, 5, 6))
Exemple #3
0
def quant_model(float_model,quant_model,batchsize,predict,pred_dir):
    '''
    Quantize the floating-point model
    Save to HDF5 file
    '''

    # make folder for saving quantized model
    head_tail = os.path.split(quant_model) 
    os.makedirs(head_tail[0], exist_ok = True)

    # make dataset and image processing pipeline
    _, x_test, _, x_test_noisy = mnist_download()
    calib_dataset = input_fn((x_test_noisy,x_test), batchsize, False)

    with custom_object_scope({'Sampling': Sampling}):
      # load trained floating-point model    
      float_model = load_model(float_model, compile=False, custom_objects={'Sampling': Sampling} )

      # quantizer
      quantizer = vitis_quantize.VitisQuantizer(float_model)
      quantized_model = quantizer.quantize_model(calib_dataset=calib_dataset)

    # saved quantized model
    quantized_model.save(quant_model)
    print('Saved quantized model to',quant_model)


    '''
    Predictions
    '''
    if (predict):
      print('\n'+DIVIDER)
      print ('Predicting with quantized model..')
      print(DIVIDER+'\n')

      # remake predictions folder
      shutil.rmtree(pred_dir, ignore_errors=True)
      os.makedirs(pred_dir)

      predict_dataset = input_fn((x_test_noisy), batchsize, False)
      predictions = quantized_model.predict(predict_dataset, verbose=0)

      # scale pixel values back up to range 0:255 then save as PNG
      for i in range(20):
        cv2.imwrite(pred_dir+'/pred_'+str(i)+'.png', predictions[i] * 255.0)
      print('Predictions saved as images in ./' + pred_dir)


    return
Exemple #4
0
 def test_anchors_2d(self):
     with custom_object_scope({'Anchors': layers.Anchors}):
         testing_utils.layer_test(
             layers.Anchors,
             kwargs={'size': 1, 'stride': 1,
                     'data_format': 'channels_last'},
             input_shape=(3, 5, 6, 4))
         testing_utils.layer_test(
             layers.Anchors,
             kwargs={'size': 1, 'stride': 1,
                     'data_format': 'channels_last'},
             input_shape=(3, None, None, None))
         testing_utils.layer_test(
             layers.Anchors,
             kwargs={'size': 1, 'stride': 1,
                     'data_format': 'channels_first'},
             input_shape=(3, 5, 6, 4))
    def test_unmerge(self):
        track_length = 5
        max_cells = 10
        embedding_dim = 64

        custom_objects = {'Unmerge': layers.Unmerge}
        with self.cached_session():
            with custom_object_scope(custom_objects):
                testing_utils.layer_test(layers.Unmerge,
                                         kwargs={
                                             'track_length': track_length,
                                             'max_cells': max_cells,
                                             'embedding_dim': embedding_dim
                                         },
                                         input_shape=(None,
                                                      track_length * max_cells,
                                                      embedding_dim))
Exemple #6
0
 def test_dilated_max_pool_3d(self, strides, dilation_rate, padding):
     custom_objects = {'DilatedMaxPool3D': layers.DilatedMaxPool3D}
     pool_size = (3, 3, 3)
     with self.cached_session():
         with custom_object_scope(custom_objects):
             testing_utils.layer_test(
                 layers.DilatedMaxPool3D,
                 kwargs={'strides': strides,
                         'padding': padding,
                         'dilation_rate': dilation_rate,
                         'pool_size': pool_size},
                 input_shape=(3, 11, 12, 10, 4))
             testing_utils.layer_test(
                 layers.DilatedMaxPool3D,
                 kwargs={'strides': strides,
                         'padding': padding,
                         'dilation_rate': dilation_rate,
                         'data_format': 'channels_first',
                         'pool_size': pool_size},
                 input_shape=(3, 4, 11, 12, 10))
Exemple #7
0
 def test_dilated_max_pool_2d(self, strides, dilation_rate, padding):
     pool_size = (3, 3)
     custom_objects = {'DilatedMaxPool2D': layers.DilatedMaxPool2D}
     with self.cached_session():
         with custom_object_scope(custom_objects):
             testing_utils.layer_test(
                 layers.DilatedMaxPool2D,
                 kwargs={'strides': strides,
                         'pool_size': pool_size,
                         'padding': padding,
                         'dilation_rate': dilation_rate,
                         'data_format': 'channels_last'},
                 input_shape=(3, 5, 6, 4))
             testing_utils.layer_test(
                 layers.DilatedMaxPool2D,
                 kwargs={'strides': strides,
                         'pool_size': pool_size,
                         'padding': padding,
                         'dilation_rate': dilation_rate,
                         'data_format': 'channels_first'},
                 input_shape=(3, 4, 5, 6))
Exemple #8
0
    def test_simple(self):
        with custom_object_scope(
            {'LocalizedAttentionLayer2D': LocalizedAttentionLayer2D}):
            in_shape = [4, 4]
            bs = 1
            dim = 4
            v_dim = dim * 2
            s = 2

            layer = LocalizedAttentionLayer2D(patch_size=(3, 3),
                                              strides=(s, s),
                                              num_heads=2,
                                              dilations=(1, 1))

            q = to_tensor(
                normal(size=(bs, in_shape[0] // s, in_shape[1] // s,
                             dim)).astype(np.float32))
            k = to_tensor(
                normal(size=(bs, in_shape[0], in_shape[1],
                             dim)).astype(np.float32))
            v = to_tensor(
                normal(size=(bs, in_shape[0], in_shape[1],
                             v_dim)).astype(np.float32))

            @tf.function
            def test_func(_q, _k, _v):
                return layer(_q, _k, _v)

            r = test_func(q, _k=k, _v=v)

            ex_res_shape = np.zeros(
                (bs, in_shape[0] // s, in_shape[1] // s, v_dim))

            self.assertShapeEqual(ex_res_shape, r)

            _test_grads(self, test_func, [q, k, v])
Exemple #9
0
 def test_simple(self):
     with custom_object_scope({'TimeDelayLayer1D': TimeDelayLayer1D}):
         layer_test(TimeDelayLayer1D,
                    kwargs={'output_dim': 4},
                    input_shape=(5, 32, 3))
Exemple #10
0
def train(float_model, predict, pred_dir, tblogs_dir, batchsize, learnrate,
          epochs):
    '''
    Variational encoder model
    '''

    image_dim = 28
    image_chan = 1
    input_layer = Input(shape=(image_dim, image_dim, image_chan))
    encoder_mu, encoder_log_variance, encoder_z = encoder.call(input_layer)

    dec_out = decoder.call(encoder_z)
    model = Model(inputs=input_layer, outputs=dec_out)
    '''
    Prepare MNIST dataset
    '''
    x_train, x_test, x_train_noisy, x_test_noisy = mnist_download()
    train_dataset = input_fn((x_train_noisy, x_train), batchsize, True)
    test_dataset = input_fn((x_test_noisy, x_test), batchsize, False)
    predict_dataset = input_fn((x_test_noisy), batchsize, False)
    '''
    Call backs
    '''
    tb_call = TensorBoard(log_dir=tblogs_dir)
    chkpt_call = ModelCheckpoint(filepath=float_model,
                                 monitor='val_mse',
                                 mode='min',
                                 verbose=1,
                                 save_weights_only=False,
                                 save_best_only=True)

    callbacks_list = [tb_call, chkpt_call]
    '''
    Compile
    '''
    model.compile(optimizer=Adam(lr=learnrate),
                  loss=lambda y_true, y_predict: loss_func(
                      y_true, y_predict, encoder_mu, encoder_log_variance),
                  metrics=['mse'])
    '''
    Training
    '''
    print(_DIVIDER)
    print('Training...')
    print(_DIVIDER)
    # make folder for saving trained model checkpoint
    os.makedirs(os.path.dirname(float_model), exist_ok=True)

    # remake Tensorboard logs folder
    shutil.rmtree(tblogs_dir, ignore_errors=True)
    os.makedirs(tblogs_dir)

    train_history = model.fit(train_dataset,
                              epochs=epochs,
                              steps_per_epoch=len(x_train) // batchsize,
                              validation_data=test_dataset,
                              callbacks=callbacks_list,
                              verbose=1)
    '''
    Predictions
    '''
    if (predict):
        print(_DIVIDER)
        print('Making predictions...')
        print(_DIVIDER)
        # remake predictions folder
        shutil.rmtree(pred_dir, ignore_errors=True)
        os.makedirs(pred_dir)

        with custom_object_scope({'Sampling': Sampling}):
            model = load_model(float_model,
                               compile=False,
                               custom_objects={'Sampling': Sampling})
        model.compile(loss=lambda y_true, y_predict: loss_func(
            y_true, y_predict, encoder_mu, encoder_log_variance))
        predictions = model.predict(predict_dataset, verbose=1)

        # scale pixel values back up to range 0:255 then save as PNG
        for i in range(20):
            cv2.imwrite(pred_dir + '/pred_' + str(i) + '.png',
                        predictions[i] * 255.0)
            cv2.imwrite(pred_dir + '/input_' + str(i) + '.png',
                        x_test_noisy[i] * 255.0)
        print('Inputs and Predictions saved as images in ./' + pred_dir)

    print(
        "\nTensorBoard can be opened with the command: tensorboard --logdir=./tb_logs --host localhost --port 6006"
    )

    return