Exemple #1
0
 def sr_text(self, btn):
     image = utils.open_image('tmp/crop_little.png')
     utils.save_image('model/dcscn_text/crop_little.png', image)
     os.system('cd model/dcscn_text && python sr.py --file=crop_little.png --batch_image_size=18 --layers=18 --filters=196 --training_images=100000 --scale=8')
     image = utils.open_image('model/dcscn_text/output/dcscn_L18_F196to48_Sc8_NIN_A64_PS_R1F32/crop_little_result.png')
     utils.save_image('tmp/crop.png', image)
     self.sr_bool = True
     self.popup_crop.dismiss()
     self.image_onPress()
def generate_layer_list(res):
    img = open_image('1.jpg')
    img = torch.unsqueeze(img, 0)
    layer_list = []
    index = 0
    for layer in res:
        if isinstance(layer, torch.nn.modules.conv.Conv2d):
            temp_class = Layer('conv', img.shape,
                               layer(img).shape, index, layer.kernel_size[0],
                               layer.stride[0], index - 1)
            img = layer(img)
        elif isinstance(layer, torch.nn.modules.activation.ReLU):
            temp_class = Layer('ReLu', img.shape,
                               layer(img).shape, index, 0, 0, index - 1)
            img = layer(img)

        elif isinstance(layer, torch.nn.modules.pooling.MaxPool2d):
            temp_class = Layer('pool', img.shape,
                               layer(img).shape, index, layer.kernel_size,
                               layer.stride, index - 1)
            img = layer(img)
        elif isinstance(layer, torch.nn.modules.dropout.Dropout):
            temp_class = Layer('Dropout', img.shape,
                               layer(img).shape, index, 0, 0, index - 1)
            img = layer(img)
        elif isinstance(layer, torch.nn.modules.linear.Linear):
            img = img.view(-1)
            temp_class = Layer('FC', img.shape,
                               layer(img).shape, index, 0, 0, index - 1)

            img = layer(img)
        layer_list.append(temp_class)
        index += 1
    return layer_list
Exemple #3
0
def main():
    # Open Image File as a coloured image
    img = utils.open_image(settings.settingsImagePath)
    # Retrieve filtered image
    walls = utils.apply_vision_filter(img)
    ground = utils.apply_ground_filter(img)
    # Initialize Bot with startup settings
    bot = robot.Robot(x=settings.settingsStartX,
                      y=settings.settingsStartY,
                      direction=settings.settingsFaceDirection,
                      wall_map=walls,
                      ground_map=ground,
                      no_of_squares_per_side=settings.settingsGridSideSquares,
                      cell_side_length=len(img) //
                      settings.settingsGridSideSquares)

    # Initialize user bot scripts
    src = settings.settingsSrcClass(bot)

    # Run setup
    src.setup()
    loop_img = numpy.copy(img)
    while True:
        # Refresh Screen
        utils.refresh_screen(loop_img, bot)
        # Loop
        loop_img = numpy.copy(img)
        ret = src.loop(loop_img)
        if ret == SimulationRunStatus.STOP_SIMULATION:
            # If stop simulation signal, Exit
            break
Exemple #4
0
 def __init__(self, path):
     self.com1 = enlace(serialName)
     self.com1.enable()
     self.img_array = open_image(path)
     self.l_bytes_img = separate_packages(self.img_array)
     self.l_packages = []
     self.str_log = ''
     self.start_execution_time = 0
Exemple #5
0
def add_reminders(img_path):
    img = open_image(img_path)
    ocr_text = ocr(img)

    menu_items = text_to_menu_items(ocr_text)[:4]
    for date, food in menu_items:
        add_reminder(date, food)
    return menu_items
Exemple #6
0
    def automatic_ai(self):
        image = utils.open_image('foo.png')
        image = utils.resize_image(image, 1280, 720)
        utils.save_image('model/CAR/input.jpg', image)
        os.system('cd model/CAR && python main.py input.jpg yolo')
        image = utils.open_image('model/CAR/result.jpg')
        image = utils.resize_image(image, 854, 480)
        utils.save_image('foo.png', image)
        self.ids.image_source.reload()

        positions = utils.get_car_positions()
        print('Cars positions: ' + str(positions))

        cars = glob.glob('model/CAR/cars/*.png')
        plate.process_cars(cars)

        utils.clean_cars_folder()
Exemple #7
0
    def __getitem__(self, index):
        _file = self.files[index]
        file_path = self.train_dir / _file
        img = open_image(file_path)

        target = self.labels[index]

        if self.preproc is not None:
            img, target = self.preproc(img, target)

        return torch.from_numpy(img), torch.from_numpy(target)
Exemple #8
0
def upload_file():
    f = request.files['file']
    mimetype = f.content_type
    print(mimetype)
    if mimetype == "image/jpeg0" or mimetype == "image/png":
        image = open_image(f)
        extracted_text = read_image(image)
        document = write_docx(extracted_text)
        print("okay")
        return send_file(document,
                         as_attachment=True,
                         attachment_filename="report.docx")
    elif mimetype == "audio/mpeg" or mimetype == "audio/wav":
        f.filename = "sound.wav"
        f.save(f.filename)
        stream = os.popen('python3 test_ffmpeg.py ' + f.filename)
        output = stream.read()
        print(output)
        text = ''
        with open('res.json') as json_file:
            data = json.load(json_file)
            text = data['text']
            print(data['text'])
        extracted_text = text
        processed_text = convert_text(extracted_text)
        print(extracted_text + ';' + processed_text)
        response = make_response(extracted_text + ';' + processed_text, 200)
        response.mimetype = "text/plain"
        return response

    elif mimetype == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
        document = open_docx(f)
        extracted_text = read_docx(document)
        processed_text = convert_text(extracted_text)
        new_document = write_docx(processed_text)
        stream = save_docx(new_document)
        print("okay")
        response = make_response(
            send_file(stream,
                      as_attachment=True,
                      attachment_filename="report1to3.docx"))
        response.headers['word'] = 'yes'
        return response
    elif mimetype == "application/vnd.oasis.opendocument.text":
        extracted_text = open_odt(f)
        processed_text = convert_text(extracted_text)
        stream = write_odt(processed_text)
        print("okay")
        response = make_response(
            send_file(stream,
                      as_attachment=True,
                      attachment_filename="report1to3.odt"))
        response.headers['word'] = 'no'
        return response
    def post(self):
        self.logger.info("Request to resize IMG for request file")

        args = self._get_args()

        for item in self.request.files.values():
            for file_info in item:
                name = "/tmp/rs-%s-%s.png" % (time.time(), file_info["filename"])

                resize(open_image(file_info["body"]), name, args)
                self.response_file(name)
                return
Exemple #10
0
    def _get_QRcode(self):
        url = 'https://qr.m.jd.com/show'
        payload = {
            'appid': 133,
            'size': 147,
            't': str(int(time.time() * 1000)),
        }
        headers = {
            'User-Agent': self.user_agent,
            'Referer': 'https://passport.jd.com/new/login.aspx',
        }
        resp = self.sess.get(url=url, headers=headers, params=payload)

        if not response_status(resp):
            logger.info('获取二维码失败')
            return False

        QRCode_file = 'QRcode.png'
        save_image(resp, QRCode_file)
        logger.info('二维码获取成功,请打开京东APP扫描')
        open_image(QRCode_file)
        return True
Exemple #11
0
    def _get_auth_code(self, uuid):
        image_file = os.path.join(os.getcwd(), 'jd_authcode.jpg')

        url = 'https://authcode.jd.com/verify/image'
        payload = {
            'a': 1,
            'acid': uuid,
            'uid': uuid,
            'yys': str(int(time.time() * 1000)),
        }
        headers = {
            'User-Agent': self.user_agent,
            'Referer': 'https://passport.jd.com/uc/login',
        }
        resp = self.sess.get(url, params=payload, headers=headers)

        if not response_status(resp):
            logger.error('获取验证码失败')
            return ''

        save_image(resp, image_file)
        open_image(image_file)
        return input('验证码:')
def classify_image(img_path):
    img = open_image(img_path)
    ocr_text = ocr(img)

    if 'mosaicscience' in ocr_text:
        return 'mosaic'

    days_mentioned = [
        token for line in ocr_text.split('\n') for token in line.split(' ')
        if token in DAYS
    ]
    if len(days_mentioned) > 5:
        return 'food'

    return 'trustnet'
Exemple #13
0
def create_collage(image_filename):
    from multiprocessing import Pool, cpu_count
    from PIL import ImageDraw

    image = utils.open_image(image_filename)
    width, height = image.size

    n_palettes = len(palette.available_palettes)
    n_methods = len(available_methods)

    canvas_size = (width * (n_palettes + 1), height * (n_methods + 1))
    canvas = Image.new('RGB', canvas_size)
    canvas.paste(image, (0, 0))
    drawer = ImageDraw.Draw(canvas)
    font = _get_font(image.size)
    font_color = (255, 255, 255, 255)

    image_matrix = utils.pil2numpy(image)

    work_objects = []

    for p_i, p in enumerate(palette.available_palettes):
        text_width, text_height = font.getsize(p)
        text_pos = ((p_i + 1) * width + (width - text_width) / 2,
                    (height - text_height) / 2)
        drawer.text(text_pos, p, font=font, fill=font_color)
        for m_i, m in enumerate(available_methods):
            if p_i == 0:
                text_width, text_height = font.getsize(m)
                text_pos = ((width - text_width) / 2,
                            (m_i + 1) * height + (height - text_height) / 2)
                drawer.text(text_pos, m, font=font, fill=font_color)
            image_offset = ((p_i + 1) * width, (m_i + 1) * height)
            work_objects.append((image_offset, image_matrix, m, p))
    del drawer

    pool = Pool(cpu_count())

    results = pool.map(_do_work, work_objects)

    for r in results:
        image_offset, dither_image = r
        canvas.paste(dither_image, image_offset)

    canvas.save('collage.png')
    canvas.show()
Exemple #14
0
def draw_image_model(img_model, out_dir):

    # image = Image.open('background.png')
    im = utils.open_image(img_model['image_path'])

    # initialise the drawing context with
    # the image object as background

    # if the out dir doesnt exist the create it
    try:
        os.makedirs(out_dir)
    except OSError:
        pass

    image_draw = ImageDraw.Draw(im)

    # p_color = 'rgb(0, 0, 0)'
    # p_color = get_rgb_color(img_model['colors'],3)
    # p_shadowcolor = get_rgb_color(img_model['colors'],1)

    # p_color = get_darkest(img_model['colors'])
    # p_shadowcolor = get_brightest(img_model['colors'])

    p_color = make_rgb_string(img_model['colors'][2])
    p_shadowcolor = make_rgb_string(img_model['colors'][3])
    

    # the poem
    draw_paragraph(img_model['text'].replace('|','\n'), image_draw, img_model['font_path'],
        10, 5, title_size=35, justify=False, line_width=25, color = p_color, 
        shadowcolor = p_shadowcolor)
    
    # the id
    draw_img_id(img_model['id'], image_draw, img_model['font_path'], im.height, text_size=80, color = p_color, shadowcolor = p_shadowcolor)

    # sig
    draw_sig('@claytantor', image_draw, img_model['font_path'], im.height, im.width, text_size=25, color = p_color, shadowcolor = p_shadowcolor)
    
    out_img_path = '{}/{}'.format(out_dir, '{}.png'.format(img_model['id']))
    print(out_img_path)
    im.save(out_img_path, "PNG", optimize=True, quality=20)
Exemple #15
0
def find_mosaic_url(img=None, img_path=None):
    if img_path:
        img = open_image(img_path)

    img_title = read_title(img)
    print(img_title)
    mosaic_url = get_google_first_result(img_title)
    print(mosaic_url)

    if mosaic_url:
        mosaic_title = get_mosaic_title(mosaic_url)
        print(mosaic_title)

        similarity_score = calculate_title_similarity(mosaic_title, img_title)
        print(similarity_score)

        if similarity_score < 0.2:
            return "https://s3-us-west-2.amazonaws.com/hs-production-blog/blog/wp-content/uploads/2017/01/27124612/headspace_blog_wrong.gif"

        return mosaic_url

    return "https://cdn.dribbble.com/users/1554526/screenshots/3399669/no_results_found.png"
Exemple #16
0
    def __getitem__(self, idx):
        arr_accumFrameNum = np.add.accumulate(self.arr_videoFrameNum)
        # print("arr_accumFrameNum=", arr_accumFrameNum)
        idxVideo = np.searchsorted(arr_accumFrameNum, idx)
        # print("idxVideo=", idxVideo)
        if idxVideo == 0:
            idxFrame = idx
        else:
            idxFrame = idx - arr_accumFrameNum[idxVideo -
                                               1] - 1  # starts from 0
        videoFolder = self.list_videoFolders[idxVideo]
        # print("videoFolder=", videoFolder)
        list_img = sorted(glob.glob(videoFolder + "/*jpg"))
        inframes = list()
        for i in range(self.temp_psz):
            if idxFrame - self.ctrlfr_idx + i < 0:
                relidx = 0
            elif idxFrame - self.ctrlfr_idx + i > len(list_img) - 1:
                relidx = len(list_img) - 1
            else:
                relidx = idxFrame - self.ctrlfr_idx + i
            img, expanded_h, expanded_w = open_image(list_img[relidx],\
                                                gray_mode=False)
            inframes.append(img)
        # list to numpy array
        inframes = np.stack(inframes, axis=0)
        # numpy to torch tensor
        inframes = torch.from_numpy(inframes[:, np.newaxis, :, :, :]).to(
            self.device)
        # target
        target = inframes[self.ctrlfr_idx]
        # Add noise
        noise = torch.empty_like(inframes).normal_(mean=0,
                                                   std=self.noise_sigma).to(
                                                       self.device)
        inframes = inframes + noise
        noisestd = torch.FloatTensor([self.noise_sigma]).to(self.device)

        return inframes, target, noisestd
Exemple #17
0
def generator(X_train, y_train, batch_size, training=False):
    '''
        Data generator used for training the model.

        - Reades images from path
        - Preprocess images
        - Augments training data only.
    '''

    number_samples = len(y_train)
    while 1:
        shuffle(X_train, y_train)
        for offset in range(0, number_samples, batch_size):
            X, y = X_train[offset:offset + batch_size], y_train[offset:offset +
                                                                batch_size]
            X = [utils.open_image(x) for x in X]
            X = [utils.pre_process(x) for x in X]

            if training:
                for i, (image, label) in enumerate(zip(X, y)):
                    X[i] = utils.augment(image)

            X = np.array([keras_image.img_to_array(x) for x in X])
            yield shuffle(X, y)
Exemple #18
0
    def load(self, path, filename):
        if(self.ids.toggle_video.state == 'down'):
            # video path
            self.video_file = os.path.join(path, filename[0])
            # clean tmp frames folder
            files = glob.glob('tmp/frames/*')
            for f in files:
                os.remove(f)
            # loading the frames of the video
            cap = cv2.VideoCapture(self.video_file)
            currentFrame = 0
            while(True):
                # Capture frame-by-frame
                ret, frame = cap.read()
                # if not ret:
                #    break
                if currentFrame > 500:
                    break
                # Saves image of the current frame in png file
                name = 'tmp/frames/' + str(currentFrame) + '.png'
                print('Creating...' + name)
                utils.save_image(name, frame)

                # To stop duplicate images
                currentFrame += 1
            cap.release()

        if(self.ids.toggle_image.state == 'down'):
            # image path
            self.image_file = os.path.join(path, filename[0])
            image = utils.open_image(self.image_file)

            # refresh image viewer widget
            utils.save_image('foo.png', image)
            self.ids.image_source.reload()
        self.dismiss_popup()
Exemple #19
0
    def handleKeyBinding(self):
        """Should have all keybinding handle here"""
        while True:

            if self.interface.resize_event:
                self.interface.handle_resize_event()
                self.interface.erase_flash_message()
                self.interface.display_timeline()

            ch = self.interface.screen.getch()

            # DOWN
            if ch == self.conf.keys["down"] or ch == curses.KEY_DOWN:
                self.interface.move_down()
            # UP
            elif ch == self.conf.keys["up"] or ch == curses.KEY_UP:
                self.interface.move_up()
            # LEFT
            elif ch == self.conf.keys["left"] or ch == curses.KEY_LEFT:
                self.interface.navigate_buffer(-1)
            # RIGHT
            elif ch == self.conf.keys["right"] or ch == curses.KEY_RIGHT:
                self.interface.navigate_buffer(+1)
            # TWEET
            elif ch == self.conf.keys["tweet"]:
                self.api.tweet()
            # RETWEET
            elif ch == self.conf.keys["retweet"]:
                self.api.retweet()
            # RETWEET AND EDIT
            elif ch == self.conf.keys["retweet_and_edit"]:
                self.api.retweet_and_edit()
            # DELETE TwEET
            elif ch == self.conf.keys["delete"]:
                self.api.destroy()
            # MENTIONS
            elif ch == self.conf.keys["mentions"]:
                self.interface.change_buffer("mentions")
            # HOME TIMELINE
            elif ch == self.conf.keys["home"]:
                self.interface.change_buffer("home")
            # CLEAR
            elif ch == self.conf.keys["clear"]:
                self.interface.clear_statuses()
            # UPDATE
            elif ch == self.conf.keys["update"]:
                self.api.update_timeline(self.interface.buffer)
            # FOLLOW SELECTED
            elif ch == self.conf.keys["follow_selected"]:
                self.api.follow_selected()
            # UNFOLLOW SELECTED
            elif ch == self.conf.keys["unfollow_selected"]:
                self.api.unfollow_selected()
            # FOLLOW
            elif ch == self.conf.keys["follow"]:
                self.api.follow()
            # UNFOLLOW
            elif ch == self.conf.keys["unfollow"]:
                self.api.unfollow()
            # OPENURL
            elif ch == self.conf.keys["openurl"]:
                self.interface.openurl()
            # BACK ON TOP
            elif ch == self.conf.keys["back_on_top"]:
                self.interface.back_on_top()
            # BACK ON BOTTOM
            elif ch == self.conf.keys["back_on_bottom"]:
                self.interface.back_on_bottom()
            # REPLY
            elif ch == self.conf.keys["reply"]:
                self.api.reply()
            # GET DIRECT MESSAGE
            elif ch == self.conf.keys["getDM"]:
                self.interface.change_buffer("direct")
            # SEND DIRECT MESSAGE
            elif ch == self.conf.keys["sendDM"]:
                self.api.direct_message()
            # SEARCH
            elif ch == self.conf.keys["search"]:
                self.api.search()
            # SEARCH USER
            elif ch == self.conf.keys["search_user"]:
                self.api.find_public_timeline()
            # SEARCH MYSELF
            elif ch == self.conf.keys["search_myself"]:
                self.api.my_public_timeline()
            elif ch == self.conf.keys["search_current_user"]:
                self.api.find_current_public_timeline()
            # Redraw screen
            elif ch == self.conf.keys["redraw"]:
                self.interface.display_redraw_screen()
            # Help
            elif ch == ord("?"):
                Help()
            # Create favorite
            elif ch == self.conf.keys["fav"]:
                self.api.set_favorite()
            # Get favorite
            elif ch == self.conf.keys["get_fav"]:
                self.api.get_favorites()
            # Destroy favorite
            elif ch == self.conf.keys["delete_fav"]:
                self.api.destroy_favorite()
            # Thread
            elif ch == self.conf.keys["thread"]:
                self.api.get_thread()
            # Open image
            elif ch == self.conf.keys["open_image"]:
                open_image(self.interface.current_status().user)
            # User info
            elif ch == ord("i"):
                self.interface.current_user_info()
            elif ch == self.conf.keys["waterline"]:
                self.interface.update_last_read_home()
                self.interface.back_on_top()
            # QUIT
            elif ch == self.conf.keys["quit"]:
                self.interface.stoped = True
                break
            else:
                continue

            self.interface.erase_flash_message()
            self.interface.display_timeline()
Exemple #20
0
def main(args):
    epochs = args.epochs

    file_list = get_files('./data')
    num_samples = len(file_list)

    tf.reset_default_graph()
    sess = tf.InteractiveSession()

    optimal = sys.float_info.max

    style = np.zeros(BATCH_SIZE, dtype=np.float32)
    style[0] = open_image(args.style_image)

    with tf.Graph().as_default(), tf.Session() as sess:

        X_content = tf.placeholder(tf.float32,
                                   shape=BATCH_SIZE,
                                   name="X_content")
        X_style = tf.placeholder(tf.float32, shape=BATCH_SIZE, name="X_style")
        generated_image = net(X_content / 255.0)

        warmup_loss = tf.reduce_mean(tf.square(generated_image - X_content))
        warmup_step = tf.train.AdamOptimizer(0.001).minimize(warmup_loss)

        loss = loss_func(sess, X_style, X_content, generated_image)
        train_step = tf.train.AdamOptimizer(0.00005).minimize(loss)
        sess.run(tf.global_variables_initializer())

        if args.mode == 0:
            saver = tf.train.Saver()
            saver.restore(sess, './model/transform_net.ckpt')
            eval_batch = np.zeros(BATCH_SIZE, dtype=np.float32)
            eval_batch[0] = open_image(args.content_image)
            start_time = time.time()
            output = sess.run(generated_image,
                              feed_dict={
                                  X_style: style,
                                  X_content: eval_batch
                              })
            end_time = time.time()
            print("Time: %s" % str(end_time - start_time))
            save_image(args.output_dir + "/generated_image.jpg", output)
            return

        print("Warming up...")
        for epoch in range(20):
            for i in range(num_samples):
                batch = np.zeros(BATCH_SIZE, dtype=np.float32)
                batch[0] = open_image(file_list[i])

                feed_dict = {X_style: style, X_content: batch}

                sess.run(warmup_step, feed_dict=feed_dict)

            eval_batch = np.zeros(BATCH_SIZE, dtype=np.float32)
            eval_batch[0] = open_image(args.content_image)
            eval_loss = sess.run(warmup_loss,
                                 feed_dict={
                                     X_style: style,
                                     X_content: eval_batch
                                 })
            print("Epoch: %d, Loss: %f" % (epoch, eval_loss))

        print("Start Training...")
        for epoch in range(args.epochs):
            for i in range(num_samples):
                batch = np.zeros(BATCH_SIZE, dtype=np.float32)
                batch[0] = open_image(file_list[i])

                feed_dict = {X_style: style, X_content: batch}

                sess.run(train_step, feed_dict=feed_dict)

            eval_batch = np.zeros(BATCH_SIZE, dtype=np.float32)
            eval_batch[0] = open_image(args.content_image)
            start_time = time.time()
            eval_loss = sess.run(loss,
                                 feed_dict={
                                     X_style: style,
                                     X_content: eval_batch
                                 })
            end_time = time.time()
            change_time = end_time - start_time
            print("Epoch: %d, Loss: %f, Time: %s" %
                  (epoch, eval_loss, str(change_time)))

            if optimal > eval_loss:
                saver = tf.train.Saver()
                res = saver.save(sess, './model/transform_net.ckpt')
                optimal = eval_loss

            if epoch % 10 == 0:
                output = sess.run(generated_image,
                                  feed_dict={
                                      X_style: style,
                                      X_content: eval_batch
                                  })
                save_image(args.output_dir + "/" + str(epoch) + ".jpg", output)
Exemple #21
0
from DNN_model import Get_ResNet_50, Get_VGG_16
from utils import vertex_extract, open_image, Layer, VM, evaluate_performance
import cv2
import cnn_finetune
import torch
import numpy as np
img = open_image('1.jpg')
img = torch.unsqueeze(img, 0)
VGG_16 = Get_VGG_16()
res = vertex_extract(VGG_16)
#Divide Index
divid_index = 19

index = 0
layer_list = []
for layer in res:
    if isinstance(layer, torch.nn.modules.conv.Conv2d):
        temp_class = Layer('conv', img.shape,
                           layer(img).shape, index, layer.kernel_size[0],
                           layer.stride[0], index - 1)
        img = layer(img)
    elif isinstance(layer, torch.nn.modules.activation.ReLU):
        temp_class = Layer('ReLu', img.shape,
                           layer(img).shape, index, 0, 0, index - 1)
        img = layer(img)

    elif isinstance(layer, torch.nn.modules.pooling.MaxPool2d):
        temp_class = Layer('pool', img.shape,
                           layer(img).shape, index, layer.kernel_size,
                           layer.stride, index - 1)
        img = layer(img)
Exemple #22
0
            opr = utils.clamp(opr + random.gauss(0.0, 1./6.))
            opg = utils.clamp(opg + random.gauss(0.0, 1./6.))
            opb = utils.clamp(opb + random.gauss(0.0, 1./6.))
            new_pixel = numpy.array(utils.closest_palette_color([opr, opg, opb],
                palette_name), dtype=numpy.float)
            new_matrix[x][y] = new_pixel
    return new_matrix

_available_methods = OrderedDict([
        ('random' , randomized),
        ('block_random' , block_randomized),
])

if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('image_filename', help='Path to an image file to dither')
    parser.add_argument('-b', '--bit-depth', type=int, default=1, help='Number of bits in dithered image')
    palette_help_str = 'Name of palette to use. Can be one of: ' + ', '.join(palette.available_palettes)
    parser.add_argument('-p', '--palette', type=str, default=default_palette, help=palette_help_str)
    args = parser.parse_args()

    image = utils.open_image(args.image_filename)
    image_matrix = utils.pil2numpy(image)

    dither_matrix = randomized(image_matrix, args.palette)
    dither_image = utils.numpy2pil(dither_matrix)

    dither_image.show()
Exemple #23
0
def train(FLAGS):

    shape = (FLAGS.image_height, FLAGS.image_width)

    content_img, style_img = utils.open_image(
        FLAGS.content_input, shape=shape), utils.open_image(FLAGS.style_input,
                                                            shape=shape)

    with tf.Session() as sess:

        print('Calculating target content...')

        vgg = vgg16(img=content_img, train=False)

        sess.run(tf.global_variables_initializer())

        vgg.load_weights(weight_file=FLAGS.model, sess=sess)

        content_target = sess.run(vgg.content_response())

        print('Calculating target style...')

        vgg = vgg16(img=style_img, train=False)

        sess.run(tf.global_variables_initializer())

        vgg.load_weights(weight_file=FLAGS.model, sess=sess)

        style_target = sess.run(vgg.style_response())

        print('Begin training on ' +
              str((FLAGS.image_width, FLAGS.image_height)) + ' image...')

        vgg = vgg16(img=utils.white_noise(shape=shape + (3, )), train=True)

        loss = FLAGS.alpha * vgg.content_loss(
            content_target) + FLAGS.beta * vgg.style_loss(
                style_target) + FLAGS.gamma * vgg.noise_loss()

        train_op = vgg.training(loss, learning_rate=FLAGS.learning_rate)

        saver = tf.train.Saver()

        if FLAGS.checkpoint is not None:
            saver.restore(sess, FLAGS.checkpoint)
            print("Progress restored.")
        else:
            sess.run(tf.global_variables_initializer())
            vgg.load_weights(weight_file=FLAGS.model, sess=sess)

        for step in range(FLAGS.max_steps):
            start_time = time.time()

            sess.run(train_op)

            duration = time.time() - start_time

            # Print status to stdout.
            if step % 20 == 0 or (step + 1) == FLAGS.max_steps:
                loss_value, content_loss, style_loss, noise_loss = sess.run([
                    loss,
                    vgg.content_loss(content_target),
                    vgg.style_loss(style_target),
                    vgg.noise_loss()
                ])
                print(
                    'Step %d: %.3f sec \n total_loss = %.3e \n content_loss = %.3e \n style_loss = %.3e \n noise_loss = %.3e'
                    % (step, duration, loss_value, FLAGS.alpha * content_loss,
                       FLAGS.beta * style_loss, FLAGS.gamma * noise_loss))

            if (step + 1) % 100 == 0 or (step + 1) == FLAGS.max_steps:
                vgg.adjust()
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file)

                output_file = str(
                    time.strftime('%Y-%m-%d %H:%M:%S',
                                  time.localtime())) + '.png'
                output_file = os.path.join(FLAGS.output_dir, output_file)
                image = vgg.img.eval()
                image = np.asarray(image[0, :, :, :], dtype='uint8')
                utils.save_image(output_file, image)
    def __init__(self,
                 content_path,
                 style_path,
                 output_path,
                 loss_ratio=1e-3,
                 verbose=True,
                 save_every_n_iters=10,
                 initial_canvas='content'):
        """ intialize all things for style transfer

        the overall process is as follows:
            1. image preprocessing
            2. define loss functions for content representations and style representations
            3. define evaluation function for training loss and gradients

        Args:
            content_image_path (str): The path for the content image
            style_image_path (str): The path for the style image
            loss_ratio (float): alpha divided by beta (beta is defined as 1)
            verbose (bool): True for print states, False otherwise
            save_every_n_steps: save images every n steps
            initial_canvas (str): the initial canvas for generated images.
                                  Choice in {'random', 'content', 'style'}
        """
        self.output_path = output_path
        self.save_every_n_iters = save_every_n_iters
        self.initial_canvas = initial_canvas
        self.verbose = verbose
        self.step = 0

        content_layer = DEFAULT_CONTENT_LAYER
        style_layers = DEFAULT_STYLE_LAYERS

        # load the style and content images
        content_img = utils.open_image(content_path)
        self.img_shape = (content_img.shape[0], content_img.shape[1], 3)
        self.content_img = utils.preproc(content_img)
        self.style_img = utils.preproc(
            utils.open_image(style_path, self.img_shape))
        content_img = K.variable(self.content_img)
        style_img = K.variable(self.style_img)

        # define a placeholder for a generated image
        if K.image_data_format() == 'channels_first':
            generated_img = K.placeholder(
                (1, 3, self.img_shape[0], self.img_shape[1]))
        else:
            generated_img = K.placeholder(
                (1, self.img_shape[0], self.img_shape[1], 3))

        # create a keras tensor for the input
        input_tensor = K.concatenate([content_img, style_img, generated_img],
                                     axis=0)

        # load VGG16 with the weights pretrained on imagenet.
        # ! the original paper uses vgg19 and replaces its all max_pooling to avg_pooling.
        vgg16 = VGG16(input_tensor=input_tensor,
                      include_top=False,
                      input_shape=self.img_shape)

        # outputs of each layer
        outputs_dict = {layer.name: layer.output for layer in vgg16.layers}

        # loss for the content image
        content_feat = outputs_dict[content_layer][0]
        generat_feat = outputs_dict[content_layer][2]
        feat_size, ch_size = utils.get_feat_channel_size(generat_feat)

        # ! the original paper suggests 'divided by 2'
        # the following denominator is from:
        # from https://github.com/cysmith/neural-style-tf/blob/master/neural_style.py
        content_loss = K.sum(K.square(content_feat - generat_feat)) \
                        / (2. *  feat_size * ch_size)

        # loss for the style image.
        style_loss = K.variable(0.)
        style_loss_weight = 1. / len(style_layers)

        for style_layer in style_layers:
            style_feat = outputs_dict[style_layer][1]
            generat_feat = outputs_dict[style_layer][2]
            feat_size, ch_size = utils.get_feat_channel_size(generat_feat)

            style_loss += style_loss_weight * \
                          K.sum(K.square(utils.gram_matrix(style_feat) - \
                                         utils.gram_matrix(generat_feat))) / \
                          (4. * feat_size ** 2 * ch_size ** 2)

        # composite loss
        beta = 1
        alpha = loss_ratio * beta
        content_loss = alpha * content_loss
        style_loss = beta * style_loss
        total_loss = content_loss + style_loss

        # gradients
        grads = K.gradients(total_loss, generated_img)

        # evaluation function
        self.eval_fn = K.function(
            [generated_img], [total_loss, content_loss, style_loss] + grads)
# This is the hopefully faster approach to training a generator
# network.

import config256 as config
import generation_network
import keras.backend as K
from keras.layers import Input
from keras.models import load_model
import keras.engine.topology as topology
import numpy as np
import utils

# Load source image.
content_target_image = utils.open_image(
    config.CONTENT_PHOTO_PATH,
    vggify=False,
)
content_target_image /= 255.0

generation_model = generation_network.build(input_shape=config.DIMS)
generation_model.load_weights(config.MODEL_PATH)

stylized_image = generation_model.predict(
    np.expand_dims(content_target_image, axis=0))
utils.save_image(0, stylized_image, image_shape=config.DIMS, unvggify=False)
Exemple #26
0
    def keystroke(self, ch):
        if not self.interface.help:
            # Quit
            if ch == self.conf.keys['quit']:
                self.interface.stoped = True
                raise urwid.ExitMainLoop()
# Right
            elif ch == self.conf.keys['right'] or ch == 'right':
                self.interface.navigate_buffer(+1)
# left
            elif ch == self.conf.keys['left'] or ch == 'left':
                self.interface.navigate_buffer(-1)
            elif ch == self.conf.keys['up']:
                self.interface.go_up()
            elif ch == self.conf.keys['down']:
                self.interface.go_down()
# Update
            elif ch == self.conf.keys['update']:
                self.api.update_timeline(self.interface.buffer)
# Tweet
            elif ch == self.conf.keys['tweet']:
                self.interface.edit_status('tweet', prompt='Tweet ')
# Reply
            elif ch == self.conf.keys['reply']:
                self.interface.reply()
# Retweet
            elif ch == self.conf.keys['retweet']:
                self.api.retweet()
# Retweet and Edit
            elif ch == self.conf.keys['retweet_and_edit']:
                self.api.retweet_and_edit()
# Delete
            elif ch == self.conf.keys['delete']:
                self.api.destroy()
# Mention timeline
            elif ch == self.conf.keys['mentions']:
                self.interface.change_buffer('mentions')
# Home Timeline
            elif ch == self.conf.keys['home']:
                self.interface.change_buffer('home')
# Direct Message Timeline
            elif ch == self.conf.keys['getDM']:
                self.interface.change_buffer('direct')
# Clear statuses
            elif ch == self.conf.keys['clear']:
                self.interface.clear_statuses()
# Follow Selected
            elif ch == self.conf.keys['follow_selected']:
                self.api.follow_selected()
# Unfollow Selected
            elif ch == self.conf.keys['unfollow_selected']:
                self.api.unfollow_selected()
# Follow
            elif ch == self.conf.keys['follow']:
                self.interface.edit_status('follow', prompt='Follow')
# Unfollow
            elif ch == self.conf.keys['unfollow']:
                self.interface.edit_status('unfollow', prompt='Unfollow ')
# Open URL
            elif ch == self.conf.keys['openurl']:
                self.interface.openurl()
# Search
            elif ch == self.conf.keys['search']:
                self.interface.edit_status('search', prompt='Search ')
# Search User
            elif ch == self.conf.keys['search_user']:
                self.interface.edit_status('public', prompt='Nick ')
# Search Myself
            elif ch == self.conf.keys['search_myself']:
                self.api.my_public_timeline()
# Search Current User
            elif ch == self.conf.keys['search_current_user']:
                self.api.find_current_public_timeline()
# Send Direct Message
#FIXME
#elif ch == self.conf.keys['sendDM']:
#self.api.direct_message()
# Create favorite
            elif ch == self.conf.keys['fav']:
                self.api.set_favorite()
# Get favorite
            elif ch == self.conf.keys['get_fav']:
                self.api.get_favorites()
# Destroy favorite
            elif ch == self.conf.keys['delete_fav']:
                self.api.destroy_favorite()
# Thread
            elif ch == self.conf.keys['thread']:
                self.api.get_thread()
# Open image
            elif ch == self.conf.keys['open_image']:
                open_image(self.interface.current_status().user)
# User info
            elif ch == 'i':
                self.interface.current_user_info()
# Waterline
            elif ch == self.conf.keys['waterline']:
                self.interface.update_last_read_home()
# Back on Top
            elif ch == self.conf.keys['back_on_top']:
                self.interface.back_on_top()
# Back on Bottom
            elif ch == self.conf.keys['back_on_bottom']:
                self.interface.back_on_bottom()
# Help
            elif ch == '?':
                self.interface.display_help()

            self.interface.display_timeline()

        else:
            if ch in ('q', 'Q', 'esc'):
                urwid.emit_signal(self, 'help_done')
Exemple #27
0
    def keystroke (self, ch):
        if not self.interface.help:
# Quit
            if ch == self.conf.keys['quit']:
                self.interface.stoped = True
                raise urwid.ExitMainLoop()
# Right
            elif ch == self.conf.keys['right'] or ch == 'right':
                self.interface.navigate_buffer(+1)
# left
            elif ch == self.conf.keys['left'] or ch == 'left':
                self.interface.navigate_buffer(-1)
            elif ch == self.conf.keys['up']:
                self.interface.go_up()
            elif ch == self.conf.keys['down']:
                self.interface.go_down()
# Update
            elif ch == self.conf.keys['update']:
                self.api.update_timeline(self.interface.buffer)
# Tweet
            elif ch == self.conf.keys['tweet']:
                self.interface.edit_status('tweet', prompt='Tweet ')
# Reply
            elif ch == self.conf.keys['reply']:
                self.interface.reply()
# Retweet
            elif ch == self.conf.keys['retweet']:
                self.api.retweet()
# Retweet and Edit
            elif ch == self.conf.keys['retweet_and_edit']:
                self.api.retweet_and_edit()
# Delete
            elif ch == self.conf.keys['delete']:
                self.api.destroy()
# Mention timeline
            elif ch == self.conf.keys['mentions']:
                self.interface.change_buffer('mentions')
# Home Timeline
            elif ch == self.conf.keys['home']:
                self.interface.change_buffer('home')
# Direct Message Timeline
            elif ch == self.conf.keys['getDM']:
                self.interface.change_buffer('direct')
# Clear statuses
            elif ch == self.conf.keys['clear']:
                self.interface.clear_statuses()
# Follow Selected
            elif ch == self.conf.keys['follow_selected']:
                self.api.follow_selected()
# Unfollow Selected
            elif ch == self.conf.keys['unfollow_selected']:
                self.api.unfollow_selected()
# Follow
            elif ch == self.conf.keys['follow']:
                self.interface.edit_status('follow', prompt='Follow')
# Unfollow
            elif ch == self.conf.keys['unfollow']:
                self.interface.edit_status('unfollow', prompt='Unfollow ')
# Open URL
            elif ch == self.conf.keys['openurl']:
                self.interface.openurl()
# Search
            elif ch == self.conf.keys['search']:
                self.interface.edit_status('search', prompt='Search ')
# Search User
            elif ch == self.conf.keys['search_user']:
                self.interface.edit_status('public', prompt='Nick ')
# Search Myself
            elif ch == self.conf.keys['search_myself']:
                self.api.my_public_timeline()
# Search Current User
            elif ch == self.conf.keys['search_current_user']:
                self.api.find_current_public_timeline()
# Send Direct Message
#FIXME
            #elif ch == self.conf.keys['sendDM']:
                #self.api.direct_message()
# Create favorite
            elif ch == self.conf.keys['fav']:
                self.api.set_favorite()
# Get favorite
            elif ch == self.conf.keys['get_fav']:
                self.api.get_favorites()
# Destroy favorite
            elif ch == self.conf.keys['delete_fav']:
                self.api.destroy_favorite()
# Thread
            elif ch == self.conf.keys['thread']:
                self.api.get_thread()
# Open image
            elif ch == self.conf.keys['open_image']:
                open_image(self.interface.current_status().user)
# User info
            elif ch == 'i':
                self.interface.current_user_info()
# Waterline
            elif ch == self.conf.keys['waterline']:
                self.interface.update_last_read_home()
# Back on Top
            elif ch == self.conf.keys['back_on_top']:
                self.interface.back_on_top()
# Back on Bottom
            elif ch == self.conf.keys['back_on_bottom']:
                self.interface.back_on_bottom()
# Help
            elif ch == '?':
                self.interface.display_help()

            self.interface.display_timeline()
        
        else:
            if ch in ('q', 'Q', 'esc'):
                urwid.emit_signal(self, 'help_done')
Exemple #28
0
    def save(self, path, filename):
        image = utils.open_image('tmp/crop.png')
        utils.save_image(os.path.join(path, filename), image)

        self.dismiss_popup()
Exemple #29
0
    def handleKeyBinding(self):
        '''Should have all keybinding handle here'''
        while True:

            if self.interface.resize_event:
                self.interface.handle_resize_event()
                self.interface.erase_flash_message()
                self.interface.display_timeline()

            ch = self.interface.screen.getch()

            # DOWN
            if ch == self.conf.keys['down'] or ch == curses.KEY_DOWN:
                self.interface.move_down()
            # UP
            elif ch == self.conf.keys['up'] or ch == curses.KEY_UP:
                self.interface.move_up()
            # LEFT
            elif ch == self.conf.keys['left'] or ch == curses.KEY_LEFT:
                self.interface.navigate_buffer(-1)
            # RIGHT
            elif ch == self.conf.keys['right'] or ch == curses.KEY_RIGHT:
                self.interface.navigate_buffer(+1)
            # TWEET
            elif ch == self.conf.keys['tweet']:
                self.api.tweet()
            # RETWEET
            elif ch == self.conf.keys['retweet']:
                self.api.retweet()
            # RETWEET AND EDIT
            elif ch == self.conf.keys['retweet_and_edit']:
                self.api.retweet_and_edit()
            # DELETE TwEET
            elif ch == self.conf.keys['delete']:
                self.api.destroy()
            # MENTIONS
            elif ch == self.conf.keys['mentions']:
                self.interface.change_buffer('mentions')
            # HOME TIMELINE
            elif ch == self.conf.keys['home']:
                self.interface.change_buffer('home')
            # CLEAR
            elif ch == self.conf.keys['clear']:
                self.interface.clear_statuses()
            # UPDATE
            elif ch == self.conf.keys['update']:
                self.api.update_timeline(self.interface.buffer)
            # FOLLOW SELECTED
            elif ch == self.conf.keys['follow_selected']:
                self.api.follow_selected()
            # UNFOLLOW SELECTED
            elif ch == self.conf.keys['unfollow_selected']:
                self.api.unfollow_selected()
            # FOLLOW
            elif ch == self.conf.keys['follow']:
                self.api.follow()
            # UNFOLLOW
            elif ch == self.conf.keys['unfollow']:
                self.api.unfollow()
            # OPENURL
            elif ch == self.conf.keys['openurl']:
                self.interface.openurl()
            # BACK ON TOP
            elif ch == self.conf.keys['back_on_top']:
                self.interface.back_on_top()
            # BACK ON BOTTOM
            elif ch == self.conf.keys['back_on_bottom']:
                self.interface.back_on_bottom()
            # REPLY
            elif ch == self.conf.keys['reply']:
                self.api.reply()
            # GET DIRECT MESSAGE
            elif ch == self.conf.keys['getDM']:
                self.interface.change_buffer('direct')
            # SEND DIRECT MESSAGE
            elif ch == self.conf.keys['sendDM']:
                self.api.direct_message()
            # SEARCH
            elif ch == self.conf.keys['search']:
                self.api.search()
            # SEARCH USER
            elif ch == self.conf.keys['search_user']:
                self.api.find_public_timeline()
            # SEARCH MYSELF
            elif ch == self.conf.keys['search_myself']:
                self.api.my_public_timeline()
            elif ch == self.conf.keys['search_current_user']:
                self.api.find_current_public_timeline()
            # Redraw screen
            elif ch == self.conf.keys['redraw']:
                self.interface.display_redraw_screen()
            # Help
            elif ch == ord('?'):
                Help()
            # Create favorite
            elif ch == self.conf.keys['fav']:
                self.api.set_favorite()
            # Get favorite
            elif ch == self.conf.keys['get_fav']:
                self.api.get_favorites()
            # Destroy favorite
            elif ch == self.conf.keys['delete_fav']:
                self.api.destroy_favorite()
            # Thread
            elif ch == self.conf.keys['thread']:
                self.api.get_thread()
            # Open image
            elif ch == self.conf.keys['open_image']:
                open_image(self.interface.current_status().user)
            # User info
            elif ch == ord('i'):
                self.interface.current_user_info()
            # QUIT
            elif ch == self.conf.keys['quit']:
                break
            else:
                continue

            self.interface.erase_flash_message()
            self.interface.display_timeline()
Exemple #30
0
    def image_onPress(self):
        if self.sr_bool is False:
            # get mouse coordinates
            x, y = utils.xy_calc(Window.mouse_pos)

            # assert x, y
            x = utils.clamp(x, crop_px, win_x - crop_px)
            y = utils.clamp(y, crop_px, win_y - crop_px)

            # crop around mouse position
            image = utils.open_image('foo.png')
            image = utils.crop_image(image, int(x), int(y), crop_px)
            utils.save_image('tmp/crop_little.png', image)
            image = utils.click_resize_image(image, 8)
            utils.save_image('tmp/crop.png', image)

            # building popup
            box_popup = GridLayout(cols=1)
            box_image = GridLayout(cols=1, size_hint=(1, .8))
            img_widget = Image(id='imagem', source='tmp/crop.png')
            img_widget.reload()
            box_image.add_widget(img_widget)
            box_popup.add_widget(box_image)
            box_control = GridLayout(cols=5, size_hint=(1, .2))
            btn1 = Button(text="Detect Plate", bold=True)
            btn2 = Button(text="SR - Face", bold=True)
            btn3 = Button(text="SR - Text", bold=True)
            btn4 = Button(text="Save Image", bold=True)
            btn5 = Button(text="Close", bold=True)
            btn1.bind(on_press=self.pop_up_get_plate)
            btn2.bind(on_press=self.sr_face)
            btn3.bind(on_press=self.sr_text)
            btn4.bind(on_press=self.show_save)
            btn5.bind(on_press=self.close_popup_crop)
            box_control.add_widget(btn1)
            box_control.add_widget(btn2)
            box_control.add_widget(btn3)
            box_control.add_widget(btn4)
            box_control.add_widget(btn5)
            box_popup.add_widget(box_control)

        else:
            print(self.sr_bool)
            # building popup
            box_popup = GridLayout(cols=1)
            box_image = GridLayout(cols=1, size_hint=(1, .8))
            img_widget = Image(id='imagem', source='tmp/crop.png')
            img_widget.reload()
            box_image.add_widget(img_widget)
            box_popup.add_widget(box_image)
            box_control = GridLayout(cols=3, size_hint=(1, .2))
            btn1 = Button(text="Detect Plate", bold=True)
            btn2 = Button(text="Save Image", bold=True)
            btn3 = Button(text="Close", bold=True)
            btn1.bind(on_press=self.pop_up_get_plate)
            btn2.bind(on_press=self.show_save)
            btn3.bind(on_press=self.close_popup_crop)
            box_control.add_widget(btn1)
            box_control.add_widget(btn2)
            box_control.add_widget(btn3)
            box_popup.add_widget(box_control)

        # creating popup
        self.popup_crop = Popup(
            title='Image', content=box_popup, size=(384, 480))
        self.popup_crop.open()
from utils import open_image, separate_packages
from datagram import Datagram

img = open_image('imgs/advice.png')
pkgs = separate_packages(img)

for i in pkgs:
    header_list = [1 for i in range(10)]
    a = Datagram(payload=[], header_list=header_list).get_datagram()
    print(a)
    break