Ejemplo n.º 1
0
    def _initEngine(self) -> None:
        # assert tf.__version__ >= '2.0.0'
        tf.compat.v1.enable_eager_execution()

        print(f"TensorFlow: is_gpu_available: {tf.test.is_gpu_available()}")
        # cuda_only=False, min_cuda_compute_capability=None

        data = StyletransferData()
        data.prepare()

        content_layers = ['block5_conv2']

        style_layers = [
            'block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1',
            'block5_conv1'
        ]

        tool = StyletransferTool(style_layers=style_layers,
                                 content_layers=content_layers)
        tool.content = imread(data._content['venice'])
        tool.style = imread(data._styles['starry_night'])
        tool.reset()

        self.setStyletransfer(tool)
Ejemplo n.º 2
0
    def test_tensorflow_network_(self):
        self.assertTrue(os.path.isfile(self.checkpoint + '.index'))

        network = TensorFlowNetwork(checkpoint=self.checkpoint)
        network.prepare()
        self.assertTrue(network.prepared)

        # print(network._sess.graph.get_operations())
        # network.summary()

        images = []
        for arg in ('images/elephant.jpg',):
            im = (imread(arg)[:, :, :3]).astype(np.float32)
            im = imresize(im, (227, 227))
            im = im - im.mean()
            im[:, :, 0], im[:, :, 2] = im[:, :, 2], im[:, :, 0]
            images.append(im)

        self.assertEqual(len(images), 1)
        self.assertTrue(isinstance(images[0], np.ndarray))

        # Assuming the first op is the input.
        network_input_tensor = \
            network._session.graph.get_operations()[0].outputs[0]
        network_output_tensor = network['dense_3'].activation_tensor

        in_op = None
        out_op = None
        for op in network._session.graph.get_operations():
            # print(op.type)
            if op.type == 'Placeholder':
                _in_op = op
                print("Heureka: in!")
            if op.type == 'Softmax':
                out_op = op
                print("Heureka: out!")
                break

        if out_op:
            feed_dict = {network_input_tensor: images}
            output = network._session.run(network_output_tensor,
                                          feed_dict=feed_dict)

        for input_im_ind in range(output.shape[0]):
            inds = np.argsort(output)[input_im_ind, :]
            print("Image", input_im_ind)
Ejemplo n.º 3
0
def main():
    """The main program.
    """

    parser = \
        argparse.ArgumentParser(description="Activation extraction from "
                                "layers of a neural network")
    parser.add_argument('--gui',
                        action='store_true',
                        help='display activations in graphical user interface')
    parser.add_argument('--iterate',
                        action='store_true',
                        help='iterate over activation values')
    parser.add_argument('--top',
                        type=int,
                        help='obtain top n activation values')
    parser.add_argument('--store',
                        action='store_true',
                        help='store activation values')
    parser.add_argument('--archive',
                        action='store_true',
                        help='use activation values from archive')
    parser.add_argument('--store-top',
                        action='store_true',
                        help='store top activation values')
    parser.add_argument('image',
                        metavar='IMAGE',
                        nargs='*',
                        help='input image(s)')

    ToolboxArgparse.add_arguments(parser)
    NetworkArgparse.prepare(parser, layers=True)
    DatasourceArgparse.prepare(parser)

    args = parser.parse_args()
    ToolboxArgparse.process_arguments(parser)

    network, layers = NetworkArgparse.network(parser, args, layers=True)
    network.summary(layers=layers)

    datasource = DatasourceArgparse.datasource(parser, args)

    # FIXME[hack]: GUI display
    global show_activations
    if args.gui:
        global app, window, activationview
        app = QApplication([])

        window = QMainWindow()
        activationview = QActivationView()
        show_activations = gui_show_activations
    else:
        show_activations = console_show_activations

    if args.iterate:
        demo_iterate_activations(network, datasource)
    elif args.store:
        demo_store_activations(network, datasource)
    elif args.archive:
        demo_load_activations(network, datasource)
    elif args.top:
        demo_top_activations(network, datasource)
    elif args.store_top:
        demo_store_top_activations(network, datasource)

    elif datasource is not None:
        #
        # loop over the dataset
        #

        # FIXME[bug]: error in ActivationWorker
        extract_activations1(network, datasource, layers=layers)

    else:
        # image_file = 'images/elephant.jpg'
        for image_file in args.image:
            image = imread(image_file)
            # FIXME[bug]: error in ActivationWorker
            demo_image_activations1(network, image)
            demo_image_activations2(network, image)
            demo_image_activations3(network, image)
            demo_image_activations4(network, image)
            demo_image_activations5(network, image)
Ejemplo n.º 4
0
def main() -> None:

    parser = ArgumentParser(description='QtGUI demo program.')
    parser.add_argument('-i',
                        '--image',
                        action='append',
                        nargs='+',
                        help='an image to be used')
    parser.add_argument('-n', '--network', help='network to be used')
    parser.add_argument('-t',
                        '--toolbox',
                        action='store_true',
                        help='use a Toolbox object')
    parser.add_argument('-w',
                        '--window',
                        action='store_true',
                        help='create a QMainWindow')
    parser.add_argument('widget', nargs='+', help='widgets to display')

    print(sys.argv)
    args = parser.parse_args()
    print(args)
    # args, unknown_args = parser.parse_known_args(sys.argv)
    # print(args, unknown_args)

    images = ([image for sublist in args.image
               for image in sublist] if args.image else
              ['/space/data/ibug300/300W/01_Indoor/indoor_034.png']
              )  # FIXME[hack]

    widgets = [
        widget_names.get(name, name).rsplit('.', 1) for name in args.widget
    ]
    print(widgets)

    app = QApplication(sys.argv[:1])
    if args.window:
        window = QMainWindow()
        centralWidget = QWidget()
        layout = QVBoxLayout()
        centralWidget.setLayout(layout)
        window.setCentralWidget(centralWidget)
        window.show()

    for module_name, cls_name in widgets:
        module = importlib.import_module(module_name)
        Widget = getattr(module, cls_name)
        qwidget = Widget()
        if cls_name == 'QImageView':
            qwidget.setImage(imread(images[0]))
        elif cls_name == 'QNetworkComboBox':
            qwidget.setOnlyInitialized(False)
        elif cls_name == 'QDatasourceComboBox':
            qwidget.setOnlyInitialized(False)

        if args.window:
            layout.addWidget(qwidget)
        else:
            qwidget.show()

    app.exec_()
Ejemplo n.º 5
0
 def setUp(self):
     """Initialize a detector to be used in the tests.
     """
     self.alexnet = Network['alexnet-tf']
     self.image = imread('images/elephant.jpg')
Ejemplo n.º 6
0
def main():
    parser = ArgumentParser(description='Face labeling tool')
    parser.add_argument('--directory',
                        type=str,
                        default=DIRECTORY,
                        help="path to the base directory "
                        "(containing clean* subdirectories)")
    parser.add_argument('--clean0',
                        type=str,
                        help="path to the clean0 directory (optional)")
    parser.add_argument('--clean2',
                        type=str,
                        help="path to the clean2 directory (optional)")
    parser.add_argument('--clean4',
                        type=str,
                        help="path to the clean4 directory "
                        "(including 'UnifiedFunneled2')")

    ToolboxArgparse.add_arguments(parser)

    args = parser.parse_args()
    ToolboxArgparse.process_arguments(parser, args)

    if args.clean0:
        directory_clean0 = args.clean0
    elif DIRECTORY_CLEAN0 is None:
        directory_clean0 = os.path.join(args.directory, 'clean0')
    else:
        directory_clean0 = DIRECTORY_CLEAN0

    if args.clean2:
        directory_clean2 = args.clean2
    elif DIRECTORY_CLEAN2 is None:
        directory_clean2 = os.path.join(args.directory, 'clean2')
    else:
        directory_clean2 = DIRECTORY_CLEAN2

    if args.clean4:
        directory_clean4 = args.clean4
    elif DIRECTORY_CLEAN4 is None:
        directory_clean4 = os.path.join(args.directory, 'clean4')
    else:
        directory_clean4 = DIRECTORY_CLEAN4
    directory_funneled = os.path.join(directory_clean4, 'UnifiedFunneled2')

    #
    # Some chanity checks
    #
    if not os.path.isdir(directory_clean4):
        logging.warning(
            "Clean4 directory '%s' does not exist "
            "- no data to label.", directory_clean4)
        sys.exit(1)

    if not os.path.isdir(directory_clean0):
        logging.warning(
            "Clean0 directory '%s' does not exist "
            "- some images may not be available.", directory_clean0)

    try:
        # RIFF (little-endian) data, Web/P image, VP8 encoding, 230x230
        problematic_image = os.path.join(directory_clean4, 'UnifiedFunneled',
                                         'AaronWolff', 'New', '1091847.jpg')
        import imageio
        from dltb.util.image import imread
        image = imread(problematic_image, module='imageio')
    except ValueError as ex:
        # imageio: ValueError: Could not find a format to read the
        # specified file in single-image mode
        logging.error("Problematic image file: '%s' (imageio version %s)",
                      problematic_image, imageio.__version__)
        # error: imageio 2.9.0 [conda: pyhd3eb1b0_0 default] (Ubuntu 20.04)
        # error: imageio 2.9.0 [conda: py_0 conda-forge] (Ubuntu 20.04)
        # ok:    imageio 2.6.1 [conda: py36_0 default] (Ubuntu 16.04)
        # print(ex, file=sys.stderr)
        #sys.exit(1)

    #
    # open the data set
    #

    try:
        datasource = ChildFaces(directory=directory_funneled, prepare=True)
    except Exception as ex:
        print(ex, file=sys.stderr)
        sys.exit(1)
    print(datasource)

    data = datasource[0]
    print(data)

    datasource.load_metadata(data)

    # FIXME[test]
    data.add_attribute('valid', True)
    datasource.write_metadata(data)

    labels = list(datasource.labels())
    label = labels[0]

    faces = [
        datasource.get_data(filename=filename)
        for filename in datasource.faces(label)
    ]
    print(faces)

    #
    # run the graphical user interface
    #
    app = QApplication([])
    screensize = QApplication.desktop().screenGeometry()

    face_labeler = QFaceLabeler(datasource)
    #image_view = face_labeler.multiImageView
    #image_view.setImages(faces)

    # This will run the graphical interface
    gui = face_labeler
    gui.setMaximumSize(screensize.width() - 300, screensize.height() - 300)

    gui.show()
    rc = app.exec()

    logging.info(f"Main: exiting gracefully (rc=%d).", rc)
    sys.exit(rc)
Ejemplo n.º 7
0
 def setUp(cls) -> None:
     cls.network = Network['alexnet-tf']
     cls.image = imread('images/elephant.jpg')
Ejemplo n.º 8
0
 def test_imread(self) -> None:
     image = imread(self.image_file)
     self.assertEqual(image.shape, (450, 300, 3))
     self.assertEqual(image.dtype, np.uint8)
     self.assertEqual(tuple(image.mean(axis=(0, 1)).astype(np.uint8)),
                      (159, 153, 143))