예제 #1
0
class ImagesStorage(AbstractStorage):
    defaults = {
        'wall': load_image('box.png', prefix=r"..\..\data"),
        'empty': load_image('grass.png', prefix=r"..\..\data"),
        'player': load_image('mario.png', prefix=r"..\..\data"),
    }

    @classmethod
    def create_singleton(cls):
        return ImagesStorage()
예제 #2
0
def test_vgg(fn, vgg19=False):
    print('test_vgg', fn)
    x = tf.placeholder(dtype='float32', shape=[None, 224, 224, 3])
    if vgg19:
        model = modelPath19
    else:
        model = modelPath16
    vgg16 = vgg.Vgg(x, 1000, vgg19, model)
    prob = vgg16.build(False)
    img = utils.load_image(fn, 224, 224)
    Mean = np.array([103.939, 116.779, 123.68])
    img = img - Mean
    batch1 = img.reshape([1, 224, 224, 3])

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        vgg16.loadModel(sess)
        out = sess.run(prob, feed_dict={x: batch1})[0][0]
        classes_num = len(out)
        print(classes_num)
        import data.vgg_classes as classes
        pred = np.argsort(out)
        for i in range(5):
            index = classes_num - i - 1
            print(pred[index], out[pred[index]],
                  classes.class_names[pred[index]])
예제 #3
0
    def __init__(self,
                 surface,
                 image_name,
                 coord_to_blit,
                 scale=None,
                 resize_to=None):
        self.surface = surface
        self.image = utils.load_image(image_name)
        self.coord_to_blit = coord_to_blit
        self.scale = scale
        self.resize_to = resize_to

        self.image_rect = self.image.get_rect()
        self.image_rect.topleft = coord_to_blit

        if self.scale is not None:
            self.image = pygame.transform.smoothscale(
                self.image,
                (
                    self.image.get_size()[0] * self.scale,
                    self.image.get_size()[1] * self.scale,
                ),
            )
        elif self.resize_to is not None:
            self.image = pygame.transform.smoothscale(self.image,
                                                      self.resize_to)

        self.image.convert()
예제 #4
0
def minibatch(file_list, batchsize, w, h):
    length = len(file_list)
    i = 0
    epoch = 0
    random.shuffle(file_list)
    while True:
        if i + batchsize >= length:
            random.shuffle(file_list)
            epoch += 1
            i = 0
        images = []
        labels = []
        for j in xrange(i, i + batchsize):
            content = file_list[j]
            npos = content.index(',')
            path = content[:npos]
            classid = content[npos + 1:len(content) - 1]

            image = utils.load_image(path, w, h)
            index = classes_id.index(classid)
            label = np.zeros(len(classes_id), dtype=np.int)
            label[index] = 1

            images.append(image)
            labels.append(label)
        i += batchsize
        images = np.array(images, dtype=np.float32)
        labels = np.array(labels, dtype=np.float32)
        yield epoch, images, labels
예제 #5
0
def try_to_ocr(path, gauss_w, gauss_h, treshold):
    try:
        image_context = ImageContext(load_image(path), gauss_h, gauss_w, treshold)
        digitCnts = get_digit_contours(get_contours_of_image(image_context.image))
        digits = [str(digit) for digit in get_digits_from_digit_contours(get_digit_map(), digitCnts, image_context.image)]
        return digits
    except Exception:
        return []
예제 #6
0
def test_load_image():
    images_list = get_images_list("images")
    for image_name in images_list:
        image = load_image(image_name, path_to_folder="images", bgr2rgb=True)
        assert len(image.shape) == 3, f"error in {image_name}"
        assert image.shape[2] == 3, f"error in {image_name}"
        assert image.max() <= 255, f"error in {image_name}"
        assert image.min() >= 0, f"error in {image_name}"
def read_prediction_gt(dname, fnames):
    images = []
    for fname in fnames:
        fname = os.path.join(dname, fname)
        image = load_image(fname)
        image = normalize(image)
        images.append(image)
    return torch.stack(images, dim=0)
예제 #8
0
def main(args):
    logging.info("Started loading image.")
    image = load_image(args.input_file)
    logging.info("Finished loading image.")
    segmentation_function = SEGMENTATION_TECHNIQUES[args.segmentation]
    logging.info("Started image segmentation.")
    segmented_image = segmentation_function(image)
    logging.info("Finished image segmentation.")
    num_segments = np.max(segmented_image)
    print(f"Number of detected jigsaw pieces: {num_segments}.")
    for i in range(1, num_segments + 1):
        logging.info(f"Start processing segment {i}.")
        subimage = segmented_image == i

        cutout = get_cutout(subimage)
        total_space = np.sum(cutout)
        best_space = 0
        space_target = 0.8
        while best_space <= space_target * total_space:
            boundaries, corners = get_boundaries_and_corners(
                cutout, num_candidates=args.corner_candidates
            )
            space_target -= 0.05
            possible_rectangles = find_rectangle_candidates(
                cutout, corners, candidate_limit=10
            )
            best_rectangle = find_best_rectangle(possible_rectangles)
            zeros = np.zeros_like(cutout)
            for p in best_rectangle:
                zeros[p[0], p[1]] = 1
            best_space = np.sum(convex_hull_image(zeros) * cutout)
            logging.info(
                f"Current space is {best_space} which is {best_space / total_space} of total space. "
                f"Required space is {space_target}."
            )
        boundary_points = np.argwhere(boundaries == 1)
        new_boundary_points, classes = classify_points(best_rectangle, boundary_points)
        description = get_object_description(
            best_rectangle,
            new_boundary_points,
            classes,
            center_of_mass(cutout),
            None,
            center_thr=12,
            max_thr=75,
        )
        print(f"Figure_{i}'s code is {description}.")
        center = center_of_mass(subimage)
        logging.info(f"Saving image to output.png")
        draw_type(image, center, description)
        image.save("output.png")
예제 #9
0
def get_output_image(self, content_path, style_path, options: dict):
    start_time = time.time()
    style_content_model = VGG19Model(CONTENT_LAYERS, STYLE_LAYERS)

    content_image, style_image = [
        load_image(path) for path in (content_path, style_path)
    ]

    image = tf.Variable(get_white_noise_image(tf.shape(content_image)[1:])) \
        if options['white_noise_input'] else tf.Variable(content_image)

    style_targets = style_content_model(style_image)['style_outputs']
    content_targets = style_content_model(content_image)['content_outputs']

    opt = tf.keras.optimizers.Adam(learning_rate=options['learning_rate'],
                                   beta_1=0.99,
                                   epsilon=1e-1)

    style_content_model.compile(opt)

    for epoch in range(options['epochs']):
        for step in range(options['steps']):
            style_content_model.fit(
                image,
                content_targets=content_targets,
                style_targets=style_targets,
                content_layer_weights=[1],
                style_layer_weights=options['style_layer_weights'],
                content_weight=options['content_weight'],
                style_weight=options['style_weight'],
                variation_weight=options['variation_weight'])
            self.update_state(state='PROGRESS',
                              meta={
                                  'current':
                                  options['steps'] * epoch + step,
                                  'total':
                                  options['steps'] * options['epochs'],
                                  'elapsed_time':
                                  "{:.1f}s".format(time.time() - start_time)
                              })

    output_path = Path('./static/output') / (str(self.request.id) + '.png')
    save_image(image, Path(output_path))

    return {
        'output_path': str(output_path),
        'total_time': "{:.1f}s".format(time.time() - start_time)
    }
예제 #10
0
def main():
  args = arguments()
  model = SmoothImageApprox(latent_size=args.latent_size).to(device)
  image = load_image(args.image, [args.size, args.size])
  save_image(f"outputs/fieldgan_ref.png", image)
  image = image.permute(2, 0, 1)
  # use same image for content and loss.
  D = StyleTransfer(image[None, ...], image[None, ...]).to(device)
  image = image.to(device)
  init_image = model.init_zero(image.permute(1,2,0))
  save_image(f"outputs/fieldgan_init.png", init_image)
  opt = optim.Adam(model.displacement.parameters(), lr=1e-3, weight_decay=0)
  #opt = optim.Adam(model.parameters(), lr=1e-3, weight_decay=0)
  train(model, image, D, opt, args)
  # TODO render the image after, fixing a latent noise and iterating through time
  test(model, args)
예제 #11
0
    def __init__(self, game_class):
        super().__init__(game_class)

        # Initializes buttons, like MenuState
        self.buttons = {
            "pause_button": (
                [
                    ImageButton(self.screen,
                                "pause.png", (750, 0),
                                resize_to=(50, 50)),
                    ImageButton(self.screen,
                                "play.png", (750, 0),
                                resize_to=(50, 50)),
                ],
                NotImplemented,
            )
        }

        # Initializes other important components.
        # NOTE: Some will be converted to lists, to obtain multiple entities of that
        self.pause_menu = PauseMenu(game_class)
        self.background = utils.load_image("bg.png").convert()
        self.player = Player()
        self.enemy = enemy.BulletEnemy(self.player)
        self.shop = ShopEntity()

        self.test_hp = HealthBar(300, 300, 30, 5, 50, 100, 0, (128, 128, 128),
                                 (100, 100, 100), 1)

        # Currently, each tile is a 800x600 pixel image. May be changed to a square (possibly 25x25) soon
        self.map = [
            [1, 1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1, 1],
            [1, 1, 1, 1, 1, 1, 1],
        ]

        # Sets some commonly used gamewide data
        game_data.player = self.player
        game_data.player_list["main_player"] = self.player
        game_data.playing_substate = self
        game_data.current_substate = self
예제 #12
0
def main(_argv):
    flags.mark_flag_as_required("model")
    flags.mark_flag_as_required("image")
    flags.mark_flag_as_required("labels")

    labels = read_label_pbtxt(FLAGS.labels)

    start_time = time.time()
    tf.keras.backend.clear_session()
    model = tf.saved_model.load(FLAGS.model)
    end_time = time.time()
    logging.info("model loaded")
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")

    image_np = load_image(FLAGS.image)
    image_tensor = np.expand_dims(image_np, axis=0)
    image_tensor = preprocess_input(image_tensor)
    height, width, _ = image_np.shape
    start_time = time.time()
    detections = model(image_tensor)
    end_time = time.time()

    boxes = detections["detection_boxes"][0].numpy()
    classes = detections["detection_classes"][0].numpy().astype(np.int32)
    scores = detections["detection_scores"][0].numpy()

    output_image = draw_boxes(
        image_np.copy(),
        boxes,
        classes,
        scores,
        labels,
        height,
        width,
        min_threshold=FLAGS.threshold,
    )

    output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
    cv2.imwrite(FLAGS.output, output_image)
    cv2.imshow("Object Detection", output_image)
    cv2.waitKey(0)
    logging.info(f"Elapsed time: {str(end_time - start_time)}sec")
예제 #13
0
def stylize(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Loads image
    content = load_image(args.content).to(device)

    with torch.no_grad():
        # Load Transformer net
        style_model = TransformerNet()
        state_dict = torch.load(args.model)

        # Load Model Weights
        style_model.load_state_dict(state_dict)
        style_model.eval().to(device)

        # Forward through Image Transformation Network
        out = style_model(content).cpu()

    # Save result image
    save_image(out, args.out)
예제 #14
0
def test(args):
    """Stylize a content image"""

    device = torch.device("cuda" if args.cuda else "cpu")

    transformer = TransformerNet().to(device)
    if args.model:
        transformer.load_state_dict(torch.load(args.model))

    content_transform = transforms.Compose([
        transforms.Resize(args.content_size),
        transforms.CenterCrop(args.content_size),
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = utils.load_image(args.content_image)
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    output = transformer(content_image).cpu().detach()
    utils.save_image(args.output_image, output[0] * 255)
def image_edit(args):
    image_path = args.data_path
    # args.save_dir = "D:/Pycharm PRoject/open_cv/data/save/images"

    ch = convex_Hull()
    sd = ShapeDetector()
    unified = []

    image = load_image(image_path)
    blurred = filter_image(image)
    mask = hsv_mask(blurred)
    contours = find_contours(mask)
    status = ch.contours_status(contours)
    contour_list = ch.contour_array(status)
    hull = ch.convex_hull(contour_list, contours)

    unified.append(hull)
    unified = np.array(unified, dtype=np.int32)

    shape = sd.detect(hull)
    sd.print_shape_parameters(shape)
    # print("Shape", shape)

    area = contour_avg_area(contours)
    if area >= 15:
        width = 5
    elif area < 15:
        width = 1
    draw_contours(image, unified, width=width)
    if args.save_dir is not None:
        if not os.path.exists(args.save_dir):
            print(
                "The directory: %s doesnot exist,........ making directory: %s"
                % (args.save_dir, args.save_dir))
            os.makedirs(args.save_dir)
        save_image(image,
                   args.save_dir + "/" + args.save_image,
                   unified,
                   width=width)
예제 #16
0
    def load(self):
        debugger("Assets: load: Loading all game assets")

        self.FLAPPY_UP_IMAGE = load_image('flappy-up.png', colorkey=None, alpha=True)
        self.FLAPPY_MIDDLE_IMAGE = load_image('flappy-middle.png', colorkey=None, alpha=True)
        self.FLAPPY_DOWN_IMAGE = load_image('flappy-down.png', colorkey=None, alpha=True)

        self.PIPE_TOP_IMAGE = load_image('yellow-cable-top.png')
        self.PIPE_BODY_IMAGE = load_image('yellow-cable-body.png')

        self.BACKGROUND_DAY_COLOR = (49, 64, 70)
        self.BACKGROUND_DAY_IMAGE = load_image('background.png')

        self.GROUND_IMAGE = load_image('ground.png')

        # sounds downloaded under CCO License from http://www.freesound.org/
        # some of these have been edited to fit the game
        self.GAME_OVER_SOUND = load_sound('game-over.wav')
        self.START_GAME_SOUND = load_sound('game-start.wav')
        self.FLAPPY_FLAP_SOUND = load_sound('jump.wav')
        self.GAINED_POINT_SOUND = load_sound('point.wav')
예제 #17
0
def app():
    st.title('Albumantations Demo App')
    st.markdown(f'Albumentations Version: {A.__version__}')

    # Select Image
    st.subheader('Image')
    image_files = glob.glob('./image/*')
    image_files = ['Upload'] + [os.path.basename(path) for path in image_files]
    image_name = st.selectbox('Select Pic', image_files)

    # Load Image
    img = load_image(image_name)

    if img is not None:
        st.subheader('Image Info')
        st.markdown(f'(Height, Width) : ({img.shape[0]}, {img.shape[1]})')

        # Setting Sidebar
        with open('config.yml') as file:
            cfg = yaml.safe_load(file)
        transforms = get_transform(cfg)

        # Display Image  -----------------------------------------------------
        display_images(img, transforms)
예제 #18
0
def train(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    transform = transforms.Compose([
        transforms.Resize(args.image_size),
        transforms.CenterCrop(args.image_size),
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    train_dataset = datasets.ImageFolder(args.dataset, transform)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size)

    # Loads transformer, vgg
    transformer = TransformerNet().to(device)
    mse_loss = torch.nn.MSELoss()
    tv_loss = TVLoss(args.tv_weight).to(device)
    optimizer = optim.Adam(transformer.parameters(), args.learning_rate)
    if LOSS_NETWORK == 'vgg16':
        vgg = VGG16(STYLE_LAYERS, CONTENT_LAYER,
                    requires_grad=False).to(device)
    else:
        vgg = VGG19(STYLE_LAYERS, CONTENT_LAYER,
                    requires_grad=False).to(device)

    # Loads style image
    style = load_image(args.style, args.batch_size).to(device)

    # Computes style
    features_style, _ = vgg(normalize_batch(style, args.norm_range))
    gram_style = [gram_matrix(y)
                  for _, y in features_style.items()]

    # Save paths
    style_name = args.style.split('/')[-1].split('.')[0]
    checkpoint_file = os.path.join(args.checkpoint_dir,
                                   '{}.pth'.format(style_name))

    # Training Algorithm
    for epoch in range(args.epochs):
        transformer.train()
        c_loss = 0.
        s_loss = 0.

        for batch_id, (x, _) in tqdm(enumerate(train_loader), unit=' batches'):
            x = x.to(device)
            n_batch = len(x)

            optimizer.zero_grad()

            pred = transformer(x)
            y = normalize_batch(pred, args.norm_range)
            x = normalize_batch(x, args.norm_range)

            features_y, content_y = vgg(y)
            features_x, content_x = vgg(x)

            # Content Loss
            content_loss = args.content_weight * \
                mse_loss(content_y[CONTENT_LAYER], content_x[CONTENT_LAYER])

            # Style Loss
            style_loss = 0.0
            features_y = [feature for _,
                          feature in features_y.items()]
            for ft_y, gm_s in zip(features_y, gram_style):
                gm_y = gram_matrix(ft_y)
                style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :])
            style_loss *= args.style_weight

            # Tv Loss
            tv = tv_loss(pred)

            # Back Propogation
            total_loss = content_loss + style_loss + tv
            total_loss.backward()
            optimizer.step()

            c_loss += content_loss.item()
            s_loss += style_loss.item()

            if (batch_id + 1) % args.log_interval == 0:
                tqdm.write('[{}] ({})\t'
                           'content: {:.6f}\t'
                           'style: {:.6f}\t'
                           'total: {:.6f}'.format(epoch+1, batch_id+1,
                                                  c_loss /
                                                  (batch_id + 1),
                                                  s_loss /
                                                  (batch_id + 1),
                                                  (c_loss + s_loss) / (batch_id + 1)))

            # Saves a Checkpoint
            if (batch_id + 1) % args.save_interval == 0:
                transformer.eval().cpu()

                tqdm.write('Checkpoint {}'.format(checkpoint_file))
                torch.save(transformer.state_dict(), checkpoint_file)

                transformer.to(device).train()
    # Save Model
    transformer.eval().cpu()
    torch.save(transformer.state_dict(), checkpoint_file)
    print("Finshed! Trained Model saved at", checkpoint_file)
예제 #19
0
def fast_train(args):
    """Fast training"""

    device = torch.device("cuda" if args.cuda else "cpu")

    transformer = TransformerNet().to(device)
    if args.model:
        transformer.load_state_dict(torch.load(args.model))
    vgg = Vgg16(requires_grad=False).to(device)
    global mse_loss
    mse_loss = torch.nn.MSELoss()

    content_weight = args.content_weight
    style_weight = args.style_weight
    lr = args.lr

    content_transform = transforms.Compose([
        transforms.Resize(args.content_size),
        transforms.CenterCrop(args.content_size),
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_dataset = datasets.ImageFolder(args.content_dataset,
                                           content_transform)
    content_loader = DataLoader(
        content_dataset,
        batch_size=args.iter_batch_size,
        sampler=InfiniteSamplerWrapper(content_dataset),
        num_workers=args.n_workers)
    content_loader = iter(content_loader)
    style_transform = transforms.Compose([
        transforms.Resize((args.style_size, args.style_size)),
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])

    style_image = utils.load_image(args.style_image)
    style_image = style_transform(style_image)
    style_image = style_image.unsqueeze(0).to(device)
    features_style = vgg(
        utils.normalize_batch(style_image.repeat(args.iter_batch_size, 1, 1,
                                                 1)))
    gram_style = [utils.gram_matrix(y) for y in features_style]

    if args.only_in:
        optimizer = Adam([
            param
            for (name, param) in transformer.named_parameters() if "in" in name
        ],
                         lr=lr)
    else:
        optimizer = Adam(transformer.parameters(), lr=lr)

    for i in trange(args.update_step):
        contents = content_loader.next()[0].to(device)
        features_contents = vgg(utils.normalize_batch(contents))

        transformed = transformer(contents)
        features_transformed = vgg(utils.standardize_batch(transformed))
        loss, c_loss, s_loss = loss_fn(features_transformed, features_contents,
                                       gram_style, content_weight,
                                       style_weight)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    # save model
    transformer.eval().cpu()
    style_name = os.path.basename(args.style_image).split(".")[0]
    save_model_filename = style_name + ".pth"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    torch.save(transformer.state_dict(), save_model_path)