Exemplo n.º 1
0
    def load_cube(self, image, padding_strategy, padded_size=None):

        output_array = None
        if padded_size is None:
            padded_size = self.roi_size

        self.padded_size = padded_size

        diagonal_length = np.sqrt(np.sum(np.power(padded_size, 2)))

        diagonal_size = [diagonal_length , diagonal_length , diagonal_length]
        for idx, i in enumerate(self.roi_size):
            if (diagonal_size[idx] - i) % 2 != 0:
                diagonal_size[idx] += 1
                pass

        # self.image_z_spacing = image.GetSpacing()[2]
        diagonal_size = np.array(diagonal_size)

        if padding_strategy == 'neighbour':
            max_sz = diagonal_size
            max_start = np.int16(np.floor(self.roi_center - max_sz / 2))
            max_end = np.int16(np.floor(self.roi_center + max_sz / 2))
            output_array = np.float32(ImageHelper.get_roi(image, max_start.tolist(), np.int16(max_sz).tolist()))
            """
            output_array = np.float32(ndarray[
                 max_start[0]:max_end[0],
                 max_start[1]:max_end[1],
                 max_start[2]:max_end[2]
            ])
            """
        elif padding_strategy in [None, 'zero', 'repeat']:
            output_array = np.float32(ImageHelper.get_roi(image, self.roi_start_idx.tolist(),  self.roi_idx_size.tolist()))
            output_array = ImageHelper.pad_image(output_array, diagonal_size, mode=padding_strategy)
            """
                        output_array = np.float32(ndarray[
                             self.roi_start_idx[0]:self.roi_end_idx[0],
                             self.roi_start_idx[1]:self.roi_end_idx[1],
                             self.roi_start_idx[2]:self.roi_end_idx[2]
                         ])
            """
        else:
            raise ValueError("Unrecognized padding mode {0}".format(padding_strategy))

        self.cube = output_array
        # self.test(image)

        return self.cube
Exemplo n.º 2
0
    def infer(self, sess):
        # 加载模型
        ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
        if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
            self.saver.restore(sess, ckpt.model_checkpoint_path)

        image_helper = ImageHelper()

        fake_imgs = sess.run(
            self.fake_imgs, 
            feed_dict={self.rand_noises: np.random.normal(size=[self.batch_size, self.noise_dim])})
        img_name = "{}/infer-image".format(self.hparams.sample_dir)
        image_helper.save_imgs(fake_imgs, 
                               img_name=img_name)

        tf.logging.info("====== generate images in file: {} ======".format(img_name))
Exemplo n.º 3
0
    def __init__(self, image_filename, model_object, input_cube_size=None):

        self.shutdown_signal = False
        self.worker_threads = []
        self.worker_thread_count = 1

        self.requires_padding = False
        self.model_cube_size = np.array([15, 15, 23])
        if input_cube_size is None:
            self.input_cube_size = self.model_cube_size
        else:
            self.input_cube_size = np.array(input_cube_size)

        for i in range(3):
            if self.input_cube_size[i] != self.model_cube_size[i]:
                self.requires_padding = True
            if self.input_cube_size[i] > self.model_cube_size[i]:
                raise ValueError(
                    "Input Cube size cannot be larger than model cube size " +
                    str(self.model_cube_size))

        self.q = None
        self.spacing_factor, self.image = ImageHelper.read_image(
            image_filename, correct_spacing=True)
        self.org_ndarray = sitk.GetArrayFromImage(self.image)
        ndarray = np.float32(np.swapaxes(self.org_ndarray, 0, 2))
        ndarray -= np.mean(ndarray, axis=0)
        ndarray /= np.std(ndarray, axis=0)

        self.ndarray = ndarray
        self.model = model_object
Exemplo n.º 4
0
    def worker(self):
        while not self.shutdown_signal:
            q = self.q

            cube_size = self.input_cube_size
            cube_half_size = cube_size / 2

            image_size = list(self.ndarray.shape)
            x_range = [[
                floor(int(image_size[0] * .15) + cube_half_size[0]),
                floor(int(image_size[0] * .45) - cube_half_size[0])
            ],
                       [
                           floor(int(image_size[0] * .55) + cube_half_size[0]),
                           floor(int(image_size[0] * .80) - cube_half_size[0])
                       ]]
            y_range = [
                floor(int(image_size[1] * .25) + cube_half_size[1]),
                floor(int(image_size[1] * .75) - cube_half_size[1])
            ]
            z_range = [
                floor(int(image_size[2] * .3) + cube_half_size[2]),
                floor(int(image_size[2] * .6) - cube_half_size[2])
            ]

            random_center = [
                random.randint(*x_range[random.randint(0, 1)]),
                random.randint(*y_range),
                random.randint(*z_range)
            ]
            cube = self.ndarray[
                floor(random_center[0] -
                      cube_half_size[0]):floor(random_center[0] +
                                               cube_half_size[0]),
                floor(random_center[1] -
                      cube_half_size[1]):floor(random_center[1] +
                                               cube_half_size[1]),
                floor(random_center[2] -
                      cube_half_size[2]):floor(random_center[2] +
                                               cube_half_size[2]), ]

            cube = ImageHelper.pad_image(cube, self.model_cube_size)
            cube = ImageHelper.add_dim(cube)
            result = self.model.test({'image': np.array([cube])})
            q.put([random_center, cube, result])
    def __init__(self, camera, output_path, app):
        self.output_path = output_path
        self.last_file_path = None
        self.app = app
        self.camera = camera
        self.camera_overlay = CameraOverlay(self.camera)
        self.printer = Printer()
        self.green_btn = Button(GREEN_BTN_PIN)
        self.red_btn = Button(RED_BTN_PIN)
        self.green_btn.when_pressed = self.pressed_capture_button
        self.red_btn.when_pressed = self.pressed_reject_print_button
        self.ui = UI(self.camera_overlay, camera.picam.resolution)
        self.image_helper = ImageHelper(camera.picam.resolution)
        self.rotary = Rotary(ROTARY_CLK_PIN, ROTARY_DT_PIN, upperBound=5)

        self.camera.show_preview(True)
        self.ui.show_main_screen()

        self.waiting_for_confirm = False
        self.busy = False
Exemplo n.º 6
0
    def load_cubes_upscale(self):

        cube_shape = None
        counter = 0

        new_rois = []
        for idx in self.image_data:
            if len(self.image_data[idx]) == 0:
                continue
            roi_list = self.image_data[idx]
            image_path = os.path.abspath(self.input_path + idx + '.img.gz')
            image, rois = ImageHelper.scale_rois(
                image_path,
                roi_list,
                0.5,
                interpolation_strategy=self.interpolation_strategy)
            new_rois.extend(rois)

        self.calc_bounds(new_rois)

        scaled_rois = []
        for idx in self.image_data:
            if len(self.image_data[idx]) == 0:
                continue

            sys.stdout.write("\rLoading Images {0} out of {1}...".format(
                counter, len(self.image_data)))
            sys.stdout.flush()
            counter += 1

            roi_list = self.image_data[idx]
            image_path = os.path.abspath(self.input_path + idx + '.img.gz')
            image, rois = ImageHelper.scale_rois(
                image_path,
                roi_list,
                0.5,
                interpolation_strategy=self.interpolation_strategy)
            cube_shape = self.load_image_cubes(image, rois, self.roi_size_max)
            scaled_rois.extend(rois)

        return scaled_rois, self.roi_size_max
Exemplo n.º 7
0
    def write_post(self, item):
        filename = '{0}/{1}.md'.format(self.posts_dir, get(item, 'post_name'))

        img = ImageHelper(
            images_dir='static/images/{0}'.format(get(item, 'post_name')),
            images_url_root='/images/{0}'.format(get(item, 'post_name'))
        )

        self.check_posts_dir()

        title = get(item, 'title')
        published = datetime.strptime(get(item, 'post_date'), '%Y-%m-%d %H:%M:%S').isoformat()
        status = get(item, 'status')

        content = PostConverter(get(item, 'content:encoded')) \
            .convert_wordpress_tags() \
            .convert_images_and_galleries() \
            .convert_html_elements() \
            .convert_youtube() \
            .clean_hyperlinks() \
            .convert_code_blocks() \
            .strip_excessive_newlines() \
            .to_string()

        first_image = img.convert_image(img.find_first_image(content))

        for url in img.find_all_images(content):
            image_url = img.convert_image(url)
            content = content.replace(url, image_url)

        print("\n\nCONVERTING: {0}\n\nFilename: {1}\nDate: {2}\nStatus: {3}".format(title, filename, published, status))

        with open(filename, 'w') as f:
            self.write_header(f, first_image, published, title)
            f.write(content)
Exemplo n.º 8
0
    def get_image(self, id):

        is_last_run = False

        self.image_locks[id].acquire()

        if id not in self.images:
            image_path = os.path.abspath(self.input_path + id + '.img.gz')
            self.images[id] = ImageHelper.read_image(image_path)

        self.image_locks[id].release()

        return self.images[id], is_last_run
Exemplo n.º 9
0
    def load_cubes_raw(self):
        cube_shape = None
        counter = 0
        for idx in self.image_data:
            sys.stdout.write("\rLoading Images {0} out of {1}...".format(
                counter, len(self.image_data)))
            sys.stdout.flush()
            counter += 1

            image_path = os.path.abspath(self.input_path + idx + '.img.gz')
            image = ImageHelper.read_image(image_path, correct_spacing=False)
            roi_list = self.image_data[idx]
            cube_shape = self.load_image_cubes(image, roi_list,
                                               self.roi_size_max)
        return self.roi_size_max
Exemplo n.º 10
0
    def extract_cubes_one_image(self):

        counter = 0
        rois = []
        self.read_rois()
        idx = None

        idx = '081832_20060531_1149725568.2203'
        roi_list = self.image_data[idx]

        spacing_factor = 1
        if idx not in self.images:
            image_path = os.path.abspath(self.input_path + idx + '.img.gz')

            # spacing_factor, self.images[idx] = ImageHelper.read_image(image_path, correct_spacing=True)
            spacing_factor = 1
            self.images[idx] = ImageHelper.read_image(image_path,
                                                      correct_spacing=False)

        the_one_roi = None
        for roi in roi_list:
            if roi.roi_start_idx[0] != 101:
                continue
            the_one_roi = roi
            roi.rescale_depth(spacing_factor)
            cube = roi.load_cube(self.images[idx], padded_size=self.cube_size)
            assert (list(cube.shape) == self.cube_max_size)
            counter += 1
            print(counter)

        counter = 0
        roi_list = [the_one_roi]

        for roi_1 in roi_list:
            counter += 1
            roi_1.lookup_table = self.class_lookup
            roi_1.y = self.class_lookup[roi_1.roi_class]
            for roi_2 in roi_list:
                roi_1.intersects(roi_2)
            print(counter)

        rois.extend(self.image_data[idx])

        pickle.dump([the_one_roi], open("one_image.p", "wb"))
Exemplo n.º 11
0
    def get_cube(self, rotation=None, padding=True, as_nd=True):

        cube_shape = np.array(self.cube.shape)
        half_cube_shape = cube_shape/2
        half_roi_size = (self.roi_idx_size/2)
        idx_start = np.int16(np.floor(half_cube_shape - half_roi_size))
        idx_end = np.int16(np.floor(half_cube_shape + half_roi_size))

        actual_size = self.roi_idx_size
        padded_size = self.padded_size

        # print(cube_shape)
        # print(padded_size)
        # print(actual_size)

        if rotation is not None:
            if sum(rotation) == 0:
                rotation = None

        ndarray = None
        if rotation is None:
            # No rotation, skip all these cpu intensive operations
            ndarray = self.cube[
                idx_start[0]:idx_end[0],
                idx_start[1]:idx_end[1],
                idx_start[2]:idx_end[2]
            ]
        else:

            # We have rotation
            rotation = np.float32(rotation) * (math.pi/180)

            rotation_center = half_cube_shape

            rigid_euler = sitk.Euler3DTransform()
            # rigid_euler.SetRotation(float(rotation[0]), float(rotation[1]), float(rotation[2]))
            rigid_euler.SetRotation(float(rotation[0]), float(rotation[1]), float(rotation[2]))

            rigid_euler.SetCenter(rotation_center - 0.5)
            ndarray = np.swapaxes(
                        sitk.GetArrayFromImage(
                            sitk.Resample(
                                sitk.GetImageFromArray(np.swapaxes(self.cube, 0, 2)),
                                rigid_euler,
                                sitk.sitkLinear,
                                0,
                                sitk.sitkFloat32
                            )
                        ),
                        0, 2)
            new_center = (np.array(ndarray.shape) / 2)
            new_start = np.int16(new_center - half_roi_size)
            new_end = np.int16(new_center + half_roi_size)

            ndarray = np.float32(ndarray[
                new_start[0]:new_end[0],
                new_start[1]:new_end[1],
                new_start[2]:new_end[2]
            ])

        # Padding is done after rotation, maybe try to pad before rotation
        if padding:
            # print(ndarray.shape)
            # print(padded_size)
            ndarray = ImageHelper.pad_image(ndarray, padded_size, mode='zero')
            # print(ndarray.shape)
        return ndarray if as_nd else sitk.GetImageFromArray(np.swapaxes(ndarray, 0, 2))
Exemplo n.º 12
0
        self.img_helper.save_image(generated, epoch, "generated/wgan/",
                                   self.dataset)

    def predict_noise(self, size):
        noise = np.random.normal(0, 1, (size, self.latent_dim))
        return self.generator.predict(noise)

    def plot_loss(self, history):
        hist = pd.DataFrame(history)
        plt.figure(figsize=(20, 5))
        for col in hist.columns:
            plt.plot(hist[col], label=col)
        plt.legend()
        plt.ylabel("loss")
        plt.xlabel("epochs")
        plt.show()


if __name__ == "__main__":
    from image_helper import ImageHelper

    (X, _), (_, _) = mnist.load_data()
    X_train = X / 127.5 - 1.
    X_train = np.expand_dims(X_train, axis=3)
    wgan = WGAN(img_shape=X_train[0].shape,
                latent_dim=100,
                img_helper=ImageHelper())
    wgan.train(epochs=40000,
               train_data=X_train,
               batch_size=128,
               sample_interval=100)
Exemplo n.º 13
0
    np.random.seed(1)
    time_start_load_everything = time.time()
    parser = argparse.ArgumentParser(description='PPDL')
    parser.add_argument('--params', dest='params')
    args = parser.parse_args()
    with open(f'./{args.params}', 'r') as f:
        params_loaded = yaml.load(f)
    current_time = datetime.datetime.now().strftime('%b.%d_%H.%M.%S')
    if params_loaded['type'] == config.TYPE_LOAN:
        helper = LoanHelper(current_time=current_time,
                            params=params_loaded,
                            name=params_loaded.get('name', 'loan'))
        helper.load_data(params_loaded)
    elif params_loaded['type'] == config.TYPE_CIFAR:
        helper = ImageHelper(current_time=current_time,
                             params=params_loaded,
                             name=params_loaded.get('name', 'cifar'))
        helper.load_data()
    elif params_loaded['type'] == config.TYPE_MNIST:
        helper = ImageHelper(current_time=current_time,
                             params=params_loaded,
                             name=params_loaded.get('name', 'mnist'))
        helper.load_data()
    elif params_loaded['type'] == config.TYPE_TINYIMAGENET:
        helper = ImageHelper(current_time=current_time,
                             params=params_loaded,
                             name=params_loaded.get('name', 'tiny'))
        helper.load_data()
    else:
        helper = None
    parser.add_argument('--name', dest='name', required=True)

    args = parser.parse_args()
    d = datetime.now().strftime('%b.%d_%H.%M.%S')
    writer = SummaryWriter(log_dir=f'runs/{args.name}')
    writer.add_custom_scalars(layout)

    with open(args.params) as f:
        params = yaml.load(f)
    if params.get('model', False) == 'word':
        helper = TextHelper(current_time=d, params=params, name='text')

        helper.corpus = torch.load(helper.params['corpus'])
        logger.info(helper.corpus.train.shape)
    else:
        helper = ImageHelper(current_time=d, params=params, name='utk')
    logger.addHandler(
        logging.FileHandler(filename=f'{helper.folder_path}/log.txt'))
    logger.addHandler(logging.StreamHandler())
    logger.setLevel(logging.DEBUG)
    logger.info(f'current path: {helper.folder_path}')
    batch_size = int(helper.params['batch_size'])
    num_microbatches = int(helper.params['num_microbatches'])
    lr = float(helper.params['lr'])
    momentum = float(helper.params['momentum'])
    decay = float(helper.params['decay'])
    epochs = int(helper.params['epochs'])
    S = float(helper.params['S'])
    z = float(helper.params['z'])
    sigma = z * S
    dp = helper.params['dp']
Exemplo n.º 15
0
    parser.add_argument('--name', dest='name', required=True)

    args = parser.parse_args()
    d = datetime.now().strftime('%b.%d_%H.%M.%S')
    writer = SummaryWriter(log_dir=f'runs/{args.name}')
    writer.add_custom_scalars(layout)

    with open(args.params) as f:
        params = yaml.load(f)
    if params.get('model', False) == 'word':
        helper = TextHelper(current_time=d, params=params, name='text')

        helper.corpus = torch.load(helper.params['corpus'])
        logger.info(helper.corpus.train.shape)
    else:
        helper = ImageHelper(current_time=d, params=params, name=args.name)
    logger.addHandler(
        logging.FileHandler(filename=f'{helper.folder_path}/log.txt'))
    logger.addHandler(logging.StreamHandler())
    logger.setLevel(logging.DEBUG)
    logger.info(f'current path: {helper.folder_path}')
    batch_size = int(helper.params['batch_size'])
    num_microbatches = int(helper.params['num_microbatches'])
    lr = float(helper.params['lr'])
    momentum = float(helper.params['momentum'])
    decay = float(helper.params['decay'])
    epochs = int(helper.params['epochs'])
    S = float(helper.params['S'])
    z = float(helper.params['z'])
    sigma = z * S
    dp = helper.params['dp']
Exemplo n.º 16
0
                            res_orb_dir = os.path.join(
                                PATH_TO_RESULT_DIR, filename + str(resolution),
                                "ORB")
                        res_orb_data_dir = os.path.join(
                            res_orb_dir, "data_" + str(j))

                        if not os.path.exists(res_orb_dir):
                            os.makedirs(res_orb_dir, exist_ok=True)
                        if not os.path.exists(res_orb_data_dir):
                            os.makedirs(res_orb_data_dir, exist_ok=True)

                        if resolution < 1:
                            # downsize resolution
                            print("downsizing resolution to " +
                                  str(resolution * 100) + " percent")
                            image_helper = ImageHelper()
                            image_helper.resize_directory(
                                os.path.join(PATH_TO_TMP_DIR, filename, "mav0",
                                             "cam0", "data"),
                                int(752 * resolution), int(480 * resolution))

                        cg = CommandGenerator()
                        command = cg.orb(filename=filename,
                                         path_to_orb=PATH_TO_ORB_SLAM,
                                         path_to_data=PATH_TO_TMP_DIR,
                                         path_to_config=PATH_TO_CONFIG,
                                         dataset=dataset,
                                         resolution=resolution)
                        print("Running ORB slam on {}!".format(filename))
                        t1 = time.perf_counter()
                        process = subprocess.Popen(command, shell=True)
Exemplo n.º 17
0
    def __init__(self, img_shape, latent_dim, num_classes, optimizer=Adam(lr=1e-4), dataset='', img_helper=ImageHelper()):
        self.img_helper = img_helper
        self.img_shape = img_shape
        self.latent_dim = latent_dim

        self.num_classes = num_classes

        self.optimizer = optimizer

        self.dataset = dataset
Exemplo n.º 18
0
    parser.add_argument('-p', '--params', dest='params')
    args = parser.parse_args()
    with open(f'./{args.params}', 'r') as f:
        params_loaded = yaml.load(f)

    current_time = datetime.now().strftime('%b.%d_%H.%M.%S')

    # Loan dataset
    if params_loaded['type'] == config.TYPE_LOAN:
        helper = LoanHelper(current_time=current_time, params=params_loaded,
                            name=params_loaded.get('name', 'loan'))
        helper.load_data(params_loaded)

    # Image Datasets
    elif params_loaded['type'] in [config.TYPE_CIFAR, config.TYPE_MNIST, config.TYPE_TINYIMAGENET]:
        helper = ImageHelper(current_time=current_time, params=params_loaded,
                             name=params_loaded.get('name'))
        helper.load_data()

    # # Loan dataset
    # if params_loaded['type'] == config.TYPE_LOAN:
    #     helper = LoanHelper(current_time=current_time, params=params_loaded,
    #                         name=params_loaded.get('name', 'loan'))
    #     helper.load_data(params_loaded)

    # # CIFAR dataset
    # elif params_loaded['type'] == config.TYPE_CIFAR:
    #     helper = ImageHelper(current_time=current_time, params=params_loaded,
    #                          name=params_loaded.get('name', 'cifar'))
    #     helper.load_data()

    # # MNIST dataset
Exemplo n.º 19
0
    ai_model = model(model_file_path, labels_file_path)

    # Initialize video reader
    video_file_path = '../Videos/01.mp4'
    video_reader = videoReader(video_file_path)

    # Detection and preview parameters
    score_threshold = 0.4
    delay_between_frames = 5

    # Perform object detection in the video sequence
    while (True):
        # Get frame from the video file
        frame = video_reader.read_next_frame()

        # If frame is None, then break the loop
        if (frame is None):
            break

        # Perform detection
        results = ai_model.detect_people(frame, score_threshold)

        # Get centers of the bounding boxes (rectangle centers)
        rectangle_centers = analyzer.get_rectangle_centers(results)

        # Draw centers before displaying results
        imgHelper.draw_rectangle_centers(frame, rectangle_centers)

        # Display detection results
        imgHelper.display_image_with_detected_objects(frame, results,
                                                      delay_between_frames)
Exemplo n.º 20
0
 def __init__(self, camera_overlay, resolution):
     self.camera_overlay = camera_overlay
     self.resolution = resolution
     self.image_helper = ImageHelper(resolution)
     self.main_screen_img = None
Exemplo n.º 21
0
# Add reference to Part_03 (assuming the code is executed from Part_04 folder)
import sys
sys.path.insert(1, '../Part_03/')

from inference import Inference as model
from image_helper import ImageHelper as imgHelper

from camera import Camera as camera

if __name__ == "__main__":
    # Load and prepare model
    model_file_path = '../Models/01_model.tflite'
    labels_file_path = '../Models/02_labels.txt'

    # Initialize model
    ai_model = model(model_file_path, labels_file_path)

    # Initialize camera
    camera_capture = camera()

    # Capture frame and perform inference
    camera_frame = camera_capture.capture_frame(False)

    score_threshold = 0.5
    results = ai_model.detect_objects(camera_frame, score_threshold)

    # Display results
    imgHelper.display_image_with_detected_objects(camera_frame, results)
Exemplo n.º 22
0
import numpy as np
from keras.datasets import fashion_mnist

from image_helper import ImageHelper
from gan import GAN

(X, _), (_, _) = fashion_mnist.load_data()
X_train = X / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)

image_helper = ImageHelper()
generative_advarsial_network = GAN(X_train[0].shape, 100, image_helper)
generative_advarsial_network.train(30000, X_train, batch_size=32)
class PhotoboothController:
    def __init__(self, camera, output_path, app):
        self.output_path = output_path
        self.last_file_path = None
        self.app = app
        self.camera = camera
        self.camera_overlay = CameraOverlay(self.camera)
        self.printer = Printer()
        self.green_btn = Button(GREEN_BTN_PIN)
        self.red_btn = Button(RED_BTN_PIN)
        self.green_btn.when_pressed = self.pressed_capture_button
        self.red_btn.when_pressed = self.pressed_reject_print_button
        self.ui = UI(self.camera_overlay, camera.picam.resolution)
        self.image_helper = ImageHelper(camera.picam.resolution)
        self.rotary = Rotary(ROTARY_CLK_PIN, ROTARY_DT_PIN, upperBound=5)

        self.camera.show_preview(True)
        self.ui.show_main_screen()

        self.waiting_for_confirm = False
        self.busy = False

    def _reset_state(self):
        self.waiting_for_confirm = False
        self.busy = False
        self.rotary.clearCallback()
        self.rotary.resetCount()

    def pressed_capture_button(self):
        print("\ncapture")
        output_path = None
        if self.waiting_for_confirm:
            self.printer.printFile(self.last_file_path,
                                   copies=self.rotary.getValue())
            self.ui.clear_screen()
            self.ui.show_main_screen()
            self._reset_state()
        elif not self.busy:
            self.busy = True
            # show countdowns
            self.ui.clear_screen()
            self.ui.show_countdown()
            # flash lights
            # self._flash()
            # take photo
            self.last_file_path = self._capture()
            # confirm with user
            self._confirm_print(self.last_file_path)
            self.waiting_for_confirm = True

    def pressed_reject_print_button(self):
        if self.waiting_for_confirm:
            print("\nreject")
            self.last_file_path = None
            self.ui.clear_screen()
            self.ui.show_main_screen()
            self._reset_state()

    def _confirm_print(self, path):
        preview_image = self.image_helper.load_image(path)
        self.camera_overlay.add_overlay(preview_image)
        self.ui.show_confirm_screen(1)
        self.rotary.registerCallback(self.ui.update_confirm_screen)

    def _flash(self):
        pass
        #img = self.image_helper.create_flash_image()
        #self.camera_overlay.add_overlay(img)
        #time.sleep(0.2)register
        #self.camera_overlay.remove_top_overlay()

    def _capture(self):
        output_path = self._new_output_path()
        print("saving photo to " + output_path)
        self.camera.capture(output_path)
        return output_path

    def _new_output_path(self):
        output_path = self.output_path + "image-%d-%mT%H:%M.png"
        return strftime(output_path, gmtime())
Exemplo n.º 24
0
        driver.switch_to.default_content()
        driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@src,'enquete')]"))
        driver.switch_to.frame(driver.find_element_by_xpath("//iframe[contains(@title,'challenge')]"))

        if check_exists_by_name(driver, "rc-imageselect-desc-no-canonical"):

            text_tag = driver.find_element_by_class_name("rc-imageselect-desc-no-canonical")
            sentence_tag = text_tag.find_element_by_tag_name("strong")
            sentence_text = sentence_tag.text
            sentence = sentence_text.split()

            image_tag = driver.find_element_by_xpath("//img[contains(@class,'rc-image')]")
            image_url = image_tag.get_attribute('src')

            image_aux = ImageHelper(image_url)
            image_aux.generate_images()

            api = ClarifaiApi(app_id=clarifai_client_settings['id'],
                              app_secret=clarifai_client_settings['secret'],
                              language=clarifai_client_language)

            tiles = driver.find_element_by_id("rc-imageselect-target")
            table = tiles.find_element_by_tag_name("table")
            tds = table.find_elements_by_tag_name("td")

            if len(tds) == 9:  # prevent error when recaptcha put more than 9 pics
                for idx, td in enumerate(tds):
                    response = api.tag_images(image_aux.get_image(idx+1))
                    results = response['results'][0]['result']['tag']['classes']
Exemplo n.º 25
0
    def train(self, sess):
        # loss summaries
        d_summary_op = tf.summary.merge([
            tf.summary.histogram("d_real_prob", tf.sigmoid(self.real_logits)),
            tf.summary.histogram("d_fake_prob", tf.sigmoid(self.fake_logits)),
            tf.summary.scalar("d_loss_fake", self.d_loss_fake),
            tf.summary.scalar("d_loss_real", self.d_loss_real),
            tf.summary.scalar("d_loss", self.d_loss)
        ],
                                        name="discriminator_summary")
        g_summary_op = tf.summary.merge([
            tf.summary.histogram("g_prob", tf.sigmoid(self.fake_logits)),
            tf.summary.scalar("g_loss", self.g_loss),
            tf.summary.image("gen_images", self.fake_imgs)
        ],
                                        name="generator_summary")

        self.summary_dir = os.path.abspath(
            os.path.join(self.hparams.checkpoint_dir, "summary"))
        summary_writer = tf.summary.FileWriter(self.summary_dir, sess.graph)

        image_helper = ImageHelper()

        sess.run(tf.global_variables_initializer())

        for num_epoch, num_batch, batch_images in image_helper.iter_images(
                dirname=self.hparams.data_dir,
                batch_size=self.batch_size,
                epoches=self.epoches):
            if (num_epoch == 0) and (num_batch < self.hparams.d_pretrain):
                # pre-train discriminator
                _, current_step, d_loss, d_accuarcy = sess.run(
                    [
                        self.d_optim, self.global_step, self.d_loss,
                        self.d_accuarcy
                    ],
                    feed_dict={
                        self.rand_noises:
                        np.random.normal(
                            size=[self.batch_size, self.noise_dim]),
                        self.real_imgs:
                        batch_images
                    })
                if current_step == self.hparams.d_pretrain:
                    tf.logging.info("==== pre-train ==== current_step:{}, d_loss:{}, d_accuarcy:{}"\
                                    .format(current_step, d_loss, d_accuarcy))
            else:
                # optimize discriminator
                _, current_step, d_loss, d_accuarcy = sess.run(
                    [
                        self.d_optim, self.global_step, self.d_loss,
                        self.d_accuarcy
                    ],
                    feed_dict={
                        self.rand_noises:
                        np.random.normal(
                            size=[self.batch_size, self.noise_dim]),
                        self.real_imgs:
                        batch_images
                    })

                # optimize generator
                if current_step % self.hparams.d_schedule == 0:
                    _, g_loss = sess.run(
                        [self.g_optim, self.g_loss],
                        feed_dict={
                            self.rand_noises:
                            np.random.normal(
                                size=[self.batch_size, self.noise_dim])
                        })

                # summary
                if current_step % self.hparams.log_interval == 0:
                    d_summary_str, g_summary_str = sess.run(
                        [d_summary_op, g_summary_op],
                        feed_dict={
                            self.rand_noises:
                            np.random.normal(
                                size=[self.batch_size, self.noise_dim]),
                            self.real_imgs:
                            batch_images
                        })
                    summary_writer.add_summary(d_summary_str, current_step)
                    summary_writer.add_summary(g_summary_str, current_step)

                    tf.logging.info("step:{}, d_loss:{}, d_accuarcy:{}, g_loss:{}"\
                                    .format(current_step, d_loss, d_accuarcy, g_loss))

            if (num_epoch > 0) and (num_batch == 0):
                # generate images per epoch
                tf.logging.info(
                    "epoch:{} === generate images and save checkpoint".format(
                        num_epoch))
                fake_imgs = sess.run(
                    self.fake_imgs,
                    feed_dict={
                        self.rand_noises:
                        np.random.normal(
                            size=[self.batch_size, self.noise_dim])
                    })
                image_helper.save_imgs(fake_imgs,
                                       img_name="{}/fake-{}".format(
                                           self.hparams.sample_dir, num_epoch))
                # save model per epoch
                self.saver.save(sess,
                                self.checkpoint_prefix,
                                global_step=num_epoch)
Exemplo n.º 26
0
import common
from image_helper import ImageHelper as imgHelper
from inference import Inference as model

if __name__ == "__main__":
    # Load and prepare model
    model_file_path = '../Models/01_model.tflite'
    labels_file_path = '../Models/02_labels.txt'

    # Initialize model
    ai_model = model(model_file_path, labels_file_path)

    # Get input image
    image = imgHelper.load_image('../Images/Lena.png')

    # Detect objects
    score_threshold = 0.5
    results = ai_model.detect_objects(image, score_threshold)

    # Display results
    imgHelper.display_image_with_detected_objects(image, results)
Exemplo n.º 27
0
class UI:
    def __init__(self, camera_overlay, resolution):
        self.camera_overlay = camera_overlay
        self.resolution = resolution
        self.image_helper = ImageHelper(resolution)
        self.main_screen_img = None

    def show_main_screen(self):
        if self.main_screen_img == None:
            self.main_screen_img = Image.new('RGBA', self.resolution,
                                             (0, 0, 0, 0))
            self.image_helper.create_circle_image(self.main_screen_img,
                                                  UIConst.BUTTON_DIA,
                                                  "green",
                                                  ImagePosition.LOWERLEFT,
                                                  yPadding=UIConst.YPADDING)
            text = self.image_helper.create_button_text_image(
                self.main_screen_img,
                "Go!",
                UIConst.FONT_SIZE,
                UIConst.BUTTON_DIA,
                UIConst.BUTTON_DIA,
                ImagePosition.LOWERLEFT,
                yPadding=UIConst.YPADDING)
        self.camera_overlay.add_overlay(self.main_screen_img)

    def show_confirm_screen(self, print_count):
        img = Image.new('RGBA', self.resolution, (0, 0, 0, 0))
        printLabel = "Copies: " + str(print_count)
        self.image_helper.create_circle_image(img,
                                              UIConst.BUTTON_DIA,
                                              "red",
                                              ImagePosition.LOWERLEFT,
                                              yPadding=UIConst.YPADDING)
        self.image_helper.create_circle_image(img,
                                              UIConst.BUTTON_DIA,
                                              "green",
                                              ImagePosition.LOWERRIGHT,
                                              yPadding=UIConst.YPADDING)
        self.image_helper.create_button_text_image(img,
                                                   "Retake",
                                                   UIConst.FONT_SIZE,
                                                   UIConst.BUTTON_DIA,
                                                   UIConst.BUTTON_DIA,
                                                   ImagePosition.LOWERLEFT,
                                                   yPadding=UIConst.YPADDING)
        self.image_helper.create_button_text_image(img,
                                                   "Print!",
                                                   UIConst.FONT_SIZE,
                                                   UIConst.BUTTON_DIA,
                                                   UIConst.BUTTON_DIA,
                                                   ImagePosition.LOWERRIGHT,
                                                   yPadding=UIConst.YPADDING)
        self.image_helper.create_text_image(img,
                                            printLabel,
                                            UIConst.FONT_SIZE,
                                            ImagePosition.UPPERCENTER,
                                            yPadding=UIConst.YPADDING)
        self.camera_overlay.add_overlay(img)

    def update_confirm_screen(self, print_count):
        print("update confirm screen with count: " + str(print_count))
        # pretty hacky since we're tearing down the whole ui just to update a label
        self.camera_overlay.remove_top_overlay()
        self.show_confirm_screen(print_count)

    def clear_screen(self):
        self.camera_overlay.remove_overlays()

    def show_countdown(self):
        for i in range(3, 0, -1):
            img = Image.new('RGBA', self.resolution, (0, 0, 0, 0))
            self.image_helper.create_text_image(img, str(i), UIConst.FONT_SIZE,
                                                ImagePosition.UPPERCENTER)
            self.camera_overlay.add_overlay(img)
            time.sleep(1)
            self.camera_overlay.remove_top_overlay()
            time.sleep(0.05)
Exemplo n.º 28
0
    return total_l, acc


if __name__ == '__main__':
    print('Start training')
    time_start_load_everything = time.time()

    parser = argparse.ArgumentParser(description='PPDL')
    parser.add_argument('--params', dest='params', default='params_words.json')
    args = parser.parse_args()

    with open(f'./{args.params}', 'r') as f:
        params_loaded = yaml.load(f)
    current_time = datetime.datetime.now().strftime('%b.%d_%H.%M.%S')
    if params_loaded['type'] == "image":
        helper = ImageHelper(current_time=current_time, params=params_loaded,
                             name=params_loaded.get('name', 'image'))
    else:
        helper = TextHelper(current_time=current_time, params=params_loaded,
                            name=params_loaded.get('name', 'text'))

    helper.load_data()
    helper.create_model()

    ### Create models
    if helper.params['is_poison']:
        helper.params['adversary_list'] = [0]+ \
                                random.sample(range(helper.params['number_of_total_participants']),
                                                      helper.params['number_of_adversaries']-1)
        logger.info(f"Poisoned following participants: {len(helper.params['adversary_list'])}")
    else:
        helper.params['adversary_list'] = list()
Exemplo n.º 29
0
    # Initialize model
    ai_model = model(model_file_path, labels_file_path)

    # Initialize video reader
    video_file_path = '../Videos/01.mp4'
    video_reader = videoReader(video_file_path)

    # Detection and preview parameters
    score_threshold = 0.4
    detect_only_people = True
    delay_between_frames = 5

    # Perform object detection in the video sequence
    while (True):
        # Get frame from the video file
        frame = video_reader.read_next_frame()

        # If frame is None, then break the loop
        if (frame is None):
            break

        # Perform detection
        if (detect_only_people):
            results = ai_model.detect_people(frame, score_threshold)
        else:
            results = ai_model.detect_objects(frame, score_threshold)

        # Display results
        imgHelper.display_image_with_detected_objects(frame, results,
                                                      delay_between_frames)