Пример #1
0
def search_image(item):
    image = None
    temp_driver = WebDriver()
    temp_driver.driver.get(search_str1 + str(item) + search_str2)
    img = WebDriverWait(temp_driver.driver, 20).until(ec.visibility_of_element_located((
        By.XPATH, '//*[@id="Sva75c"]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img')))
    # attrs = temp_driver.driver.execute_script(
    #     'var items = {}; '
    #     'for (index = 0; index < arguments[0].attributes.length; ++index) '
    #     '{ items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; '
    #     'return items;', img)
    # print(attrs)
    for i in range(20):
        try:
            src = str(img.get_attribute('src'))
            print(src)
            if src.find('http') == -1:
                time.sleep(1)
                continue
                # # will get image from base64
                # encoded_src = src.split("base64,", 1)[1]
                # decoded_src = base64.b64decode(encoded_src)
                # image = Image.open(BytesIO(decoded_src))
            else:
                image = load_image(src, (780 / SCALE, 600 / SCALE))
                # image.show()
                break
        except (TypeError, RequestException) as e:
            print(f'{i}. Error: {e}')
    temp_driver.driver.quit()
    if image is not None:
        image_dict = process_color(image)
        return image_dict
    return None
Пример #2
0
def handle_shrink(args):
    """
  Handles shrinking all given images to the given size
  """
    # Checks that we will shrink
    if (not can_shrink(args)):
        return

    target_size = _get_target_size(args)
    target_position = _get_target_position(target_size)

    # For each file
    for filename in args.files:
        # Load in
        image = load_image(filename)
        if image == None:
            continue

        # Check type
        image_type = get_image_type(image)
        if image_type != "sprite":
            continue

        shrunk_filename = _get_shrunk_filename(filename, args)
        if (confirm_overwrite(shrunk_filename, args.confirm,
                              args.preserve) == False):
            continue

        # Extract and save
        resized_image = _resize_image(image, target_size, target_position)
        pygame.image.save(resized_image, shrunk_filename)
Пример #3
0
 def draw(self):
     img_url = self.entry_img.get()
     if len(img_url) < 6 and self.image_dict is not None:
         draw_image(self.image_dict)
     elif is_url(img_url):
         img = load_image(img_url, (780 / SCALE, 600 / SCALE))
         img_dict = process_color(img)
         time.sleep(5)
         draw_image(img_dict)
Пример #4
0
def handle_extraction(args):
    """
  Handles the full extraction process for all given images
  """
    resizing_files = []

    for filename in args.files:

        # Does exist
        image = load_image(filename)
        if image == None:
            continue

        # Get extraction type, continue if we can't extract
        image_type = get_image_type(image)
        if not (image_type == "face" or image_type == "character"):
            resizing_files.append(filename)
            continue

        # Get target filename
        extracted_filename = _get_extracted_filename(filename, args.game,
                                                     image_type)
        resizing_files.append(extracted_filename)

        # Ensure we don't overwrite, unless we want to
        if (os.path.exists(extracted_filename)):
            if (args.confirm or confirm("Are you sure you want to overwrite " +
                                        extracted_filename)):
                pass
            else:
                continue

        # Extract and save
        extracted_image = _extract_image(image, image_type)
        pygame.image.save(extracted_image, extracted_filename)

    return resizing_files
Пример #5
0
def style_transfer(content=None,
                   content_dir=None,
                   content_size=512,
                   style=None,
                   style_dir=None,
                   style_size=512,
                   crop=None,
                   alpha=1.0,
                   output_dir='output',
                   save_ext='jpg',
                   gpu=0,
                   vgg_weights='models/vgg19_weights_normalized.h5',
                   decoder_weights='models/ckp-MST-paper',
                   patch_size=3,
                   n_clusters_s=3,
                   graphPara_smooth=0.1,
                   graphPara_max_cycles=3,
                   data_format='channels_first'):
    assert bool(content) != bool(
        content_dir), 'Either content or content_dir should be given'
    assert bool(style) != bool(
        style_dir), 'Either style or style_dir should be given'

    if not os.path.exists(output_dir):
        print('Creating output dir at', output_dir)
        os.makedirs(output_dir)

    # Assume that it is either an h5 file or a name of a TensorFlow checkpoint
    decoder_in_h5 = decoder_weights.endswith('.h5')

    if content:
        content_batch = [content]
    else:
        content_batch = extract_image_names_recursive(content_dir)

    if style:
        style_batch = [style]
    else:
        style_batch = extract_image_names_recursive(style_dir)

    print('Number of content images:', len(content_batch))
    print('Number of style images:', len(style_batch))
    total_output_batch = len(content_batch) * len(style_batch)
    print('Total number of output:', total_output_batch)

    image, content, style, target, encoder, decoder = _build_graph(
        vgg_weights,
        decoder_weights if decoder_in_h5 else None,
        alpha,
        patch_size,
        data_format=data_format)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    start_time = time.time()
    with tf.Session() as sess:
        if decoder_in_h5:
            sess.run(tf.global_variables_initializer())
        else:
            saver = tf.train.Saver()
            saver.restore(sess, decoder_weights)

        for content_path, style_path in product(content_batch, style_batch):
            content_name = get_filename(content_path)
            content_image = load_image(content_path, content_size, crop)

            style_name = get_filename(style_path)
            style_image = load_image(style_path, style_size, crop)

            style_image = prepare_image(style_image)
            content_image = prepare_image(content_image)
            style_feature = sess.run(
                encoder, feed_dict={image: style_image[np.newaxis, :]})
            content_feature = sess.run(
                encoder, feed_dict={image: content_image[np.newaxis, :]})

            # style_feature and content_feature information
            Bc, Cc, Hc, Wc = content_feature.shape
            Bs, Cs, Hs, Ws = style_feature.shape
            c_feat_rec_HWxC = np.zeros((Hc * Wc, Cc))

            # reshape content feature
            c_feat_HWxC = BxCxHxW_to_HWxC(content_feature)

            # cluster style feature
            s_feat_HWxC = BxCxHxW_to_HWxC(style_feature)
            s_cluster_centers, s_cluster_labels = cluster_feature(
                s_feat_HWxC, n_clusters_s)

            # construct D
            graphPara_D = np.double(
                1 - cosine_similarity(c_feat_HWxC, s_cluster_centers))
            # construct V
            X, Y = np.mgrid[:n_clusters_s, :n_clusters_s]
            graphPara_V = graphPara_smooth * np.float_(np.abs(X - Y))
            # graph cut
            graphPara_sol = fastmin.aexpansion_grid(graphPara_D, graphPara_V,
                                                    graphPara_max_cycles)
            # ST
            for label_idx in range(n_clusters_s):
                print("#%d cluster:" % label_idx)
                # select content feature
                c_subset_index = np.argwhere(
                    graphPara_sol == label_idx).flatten()
                c_subset_sample = c_feat_HWxC[c_subset_index, :]
                c_subset_sample = HWxC_to_BxCxHWxW0(c_subset_sample)
                print("c_subset_sample:", c_subset_sample.shape)
                # select cooresponding style feature
                s_subset_index = np.argwhere(
                    s_cluster_labels == label_idx).flatten()
                s_subset_sample = s_feat_HWxC[s_subset_index, :]
                s_subset_sample = HWxC_to_BxCxHWxW0(s_subset_sample)
                print("s_subset_sample:", s_subset_sample.shape)
                # feature transfer
                t_subset_sample = sess.run(target,
                                           feed_dict={
                                               content: c_subset_sample,
                                               style: s_subset_sample
                                           })

                # target feature subset
                t_subset_sample = BxCxHxW_to_HWxC(t_subset_sample)
                c_feat_rec_HWxC[c_subset_index, :] = t_subset_sample
            # reshape to target feature
            target_feature = HWxC_to_BxCxHxW(c_feat_rec_HWxC, Hc, Wc, Cc)

            # obtain output
            output = sess.run(decoder,
                              feed_dict={
                                  content: content_feature,
                                  target: target_feature
                              })

            filename = '%s_stylized_%s.%s' % (content_name, style_name,
                                              save_ext)
            filename = os.path.join(output_dir, filename)
            save_image(filename, output[0], data_format=data_format)
            print('Output image saved at', filename)
        end_time = time.time()
        print('Total outputs=' + str(total_output_batch) + ', total time=' +
              str(end_time - start_time) + ', average time=' +
              str((end_time - start_time) / total_output_batch))