Пример #1
0
def main():
    path = sys.argv[1]
    ip = ImageProcessor(path)
    scrabble_goard = ip.create_board()
    move, board = GameSolver(scrabble_goard, []).calculate_optimal_move()
    print "\n"
    board.pretty_print()
Пример #2
0
def getImage(imageKey):
    imageProcessor = ImageProcessor()
    imageContent = imageProcessor.getImageContent(imageKey)

    if not imageContent:
        return abort(404)
    else:
        response = make_response(imageContent)
        response.headers['Content-Type'] = 'image/jpeg'
        return response
Пример #3
0
    def __init__(self):
        self.isActive = False
        self.location = ""
        self.window = Tk()
        self.window.geometry(f'600x{self.window.winfo_screenheight() - 60}+{self.window.winfo_screenwidth()-600}+0')
        self.window.attributes('-topmost', True)

        # Used to remove the title tabs
        self.window.overrideredirect(1)
        self.create_widgets()
        self.window.withdraw()
        self.imageProcessor = ImageProcessor(self.get_dimensions())
Пример #4
0
def process_image(pic_size):
    # ejemplo Image processor
    path = "C:/Users/Jp/Desktop/Gestos/ADIOS/1"  #path a una carpeta con las imagenes que conformaran la tira
    aug = ImageDataGenerator(zoom_range=0.15,
                             shear_range=0.10,
                             fill_mode="nearest")
    image_processor = ImageProcessor(pic_size, path)
    #image_processor.rescale_images_from()
    #path += "/rescaled"
    #image_processor.path = path
    #image_processor.augment_images_from(aug, 10)
    #image_processor.blurred_edge_images_from(k_h=1, k_w=1)
    number_copies = 10
    image_processor.get_strip_from(
        image_filter=image_processor.
        EDGES,  #image_filter=image_processor.BLURRY_EDGES puede ser tambien este, este ultimo difumina mas
        strip_length=5,  #el largo de la tira, siempre trabaje con 5 de largo
        aug=(aug, number_copies
             ))  #inyeccion para data augmentation, (objeto, numero de copias)
Пример #5
0
class TkWindow:
    def __init__(self):
        self.isActive = False
        self.location = ""
        self.window = Tk()
        self.window.geometry(f'600x{self.window.winfo_screenheight() - 60}+{self.window.winfo_screenwidth()-600}+0')
        self.window.attributes('-topmost', True)

        # Used to remove the title tabs
        self.window.overrideredirect(1)
        self.create_widgets()
        self.window.withdraw()
        self.imageProcessor = ImageProcessor(self.get_dimensions())
    
    def create_widgets(self):
        self.text = Label(self.window, text = self.location)
        self.text.pack()
        
        self.fig = plt.Figure(figsize=(6, 6))
        self.a = self.fig.add_subplot(111)

        self.canvas = FigureCanvasTkAgg(self.fig, master=self.window)
        self.canvas.get_tk_widget().pack()

    def get_dimensions(self):
        return {
            'height': self.window.winfo_screenheight(),
            'width': self.window.winfo_screenwidth()
        }

    def update_location(self):
        self.location = self.imageProcessor.process_screenshot('location')
        if len(self.location) != 0:
            self.text["text"] = self.location['UniqueName']
            self.window.after(400, self.update_location)
            ablionMap = self.getResponse(f"https://www.albiononline2d.com/en/map/api/nodes/{self.location['Index']}")['resourceNodes']
            self.a.scatter(ablionMap["origX"], ablionMap["origY"], color='red')
            self.canvas.draw()

    def getResponse(self, url):
        request = urllib.request.urlopen(url)
        if (request.getcode() == 200):
            data = request.read()
        return data
        
    def start_loop(self):
        self.update_location()
        self.window.mainloop()
        
    def toggle_visibility(self):
        self.isActive = not self.isActive
        self.window.deiconify() if self.isActive else self.window.withdraw()

    def destroy(self):
        self.window.destroy()
Пример #6
0
    def run(self, feed):
        """

        :param feed:
        :return:
        """
        if not hasattr(feed, 'getNewEpisodes'):
            return None

        self.nprocs = cpu_count()
        self.image_processor = ImageProcessor(self.config)

        new_episodes = feed.getNewEpisodes()
        for episode in new_episodes:
            output = "output/" + self.slugify(episode.title) + ".mp4"

            if not os.path.isfile(output):
                print "Audio Link: " + episode.link
                audio_clip = mpe.AudioFileClip(self.download(episode.link))
                self.image_processor.make_image1(episode)

                self.createMovie(episode=episode,
                                 audioClip=audio_clip,
                                 output=output)
Пример #7
0
class VideoCreator:

    nprocs = 4

    @property
    def image_processor(self):
        return self.image_processor

    @image_processor.setter
    def image_processor(self, value):
        self.image_processor = value

    @image_processor.getter
    def image_processor(self):
        return self.image_processor

    @property
    def thread_count(self):
        return self.thread_count

    @thread_count.setter
    def thread_count(self, value):
        self.thread_count = value

    @property
    def feed(self):
        return self.feed

    @feed.setter
    def feed(self, value):
        self.feed = value

    @property
    def backgroundImage(self):
        return self.backgroundImage

    @backgroundImage.setter
    def backgroundImage(self, value):
        self.backgroundImage = value

    def run(self, feed):
        """

        :param feed:
        :return:
        """
        if not hasattr(feed, 'getNewEpisodes'):
            return None

        self.nprocs = cpu_count()
        self.image_processor = ImageProcessor(self.config)

        new_episodes = feed.getNewEpisodes()
        for episode in new_episodes:
            output = "output/" + self.slugify(episode.title) + ".mp4"

            if not os.path.isfile(output):
                print "Audio Link: " + episode.link
                audio_clip = mpe.AudioFileClip(self.download(episode.link))
                self.image_processor.make_image1(episode)

                self.createMovie(episode=episode,
                                 audioClip=audio_clip,
                                 output=output)

            #YtUploader(self.config).upload(output, episode)

    def get_chapter_duration(self, chapters, full_duration=None, idx=None):
        """

        :param chapters:
        :param full_duration:
        :param idx:
        :return:
        """
        start = cvsecs(chapters[idx].start)
        if idx is 0:
            start = 0
        try:
            chapter_end_time = chapters[idx + 1].start
        except IndexError:
            chapter_end_time = full_duration

        chapter_end_time = cvsecs(chapter_end_time)
        duration = chapter_end_time - start

        return duration

    def createMovie(self, episode=None, audioClip=None, output=None):
        """

        :param episode:
        :param audioClip:
        :param output:
        :return:
        """
        print " Creating Clips ..."
        if episode is None or audioClip is None:
            return None
        full_duration = audioClip.duration

        if self.config.test:
            full_duration = float(10)

        clips = []

        for idx, chapter in enumerate(episode.chapters):
            clips.append(
                self.createClip(
                    chapter,
                    self.get_chapter_duration(
                        episode.chapters,
                        idx=idx,
                        full_duration=float(full_duration))))
        if output is None:
            output = "output/" + self.slugify(episode.title) + ".mp4"

        fps = 29.98
        if hasattr(self.config, 'video_fps'):
            fps = self.config.video_fps

        final = mpe.concatenate_videoclips(clips, method="compose")

        if not self.config.test:
            final = final.set_audio(audioClip)

        final.write_videofile(output,
                              threads=self.nprocs,
                              fps=float(fps),
                              temp_audiofile=self.config.temp_path +
                              'temp-audio.mp3',
                              audio_codec=self.config.audio_codec,
                              codec=self.config.video_codec,
                              bitrate=self.config.video_bitrate)
        self.cleanup()

    def cleanup(self):
        """

        :return:
        """
        d = path(self.config.temp_path)
        for file in d.files('*.png'):
            file.remove()
            print "Removed {} file".format(file)
        for file in d.files('*.mp3'):
            file.remove()
            print "Removed {}".format(file)

    def createClip(self, chapter, duration=10):
        """
        Creates small clips based on the chapter images.

        :param chapter:
        :param duration:
        :return:
        """
        filename = md5(chapter.title.encode('utf-8')).hexdigest()
        img = self.config.temp_path + filename + ".png"
        if not os.path.isfile:
            print "File not found"
            return None
        clip = mpe.ImageClip(img, duration=duration)
        clip.duration = duration
        return clip

    def download(self, link):
        file_name = self.config.temp_path + os.path.basename(link)
        if os.path.isfile(file_name):
            return file_name
        with open(file_name, "wb") as f:
            print "Downloading %s" % file_name
            response = requests.get(link, stream=True)
            total_length = response.headers.get('content-length')

            if total_length is None:  # no content length header
                f.write(response.content)
            else:
                dl = 0
                total_length = int(total_length)
                for data in response.iter_content(chunk_size=4096):
                    dl += len(data)
                    f.write(data)
                    done = int(50 * dl / total_length)
                    sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' *
                                                   (50 - done)))
                    sys.stdout.flush()
        return file_name

    def slugify(self, value):
        """
        Normalizes string, converts to lowercase, removes non-alpha characters,
        and converts spaces to hyphens.
        """
        import unicodedata
        value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
        value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
        value = unicode(re.sub('[-\s]+', '-', value))
        return value

    def __init__(self, config):
        self.config = config
Пример #8
0
def processBatchEncloser(data):
    global nbThread
    imP = ImageProcessor(zipsearch, nbThread)
    return imP.processBatch(data)
Пример #9
0
import logging
import requests
import urlparse

from lxml.etree import XMLSyntaxError
import lxml.html as lh
from lxml.html.clean import Cleaner

from imageProcessor import ImageProcessor

logger = logging.getLogger('htmlProcessor')
__imageProcessor = ImageProcessor()


def _isAbsolute(url):
    return bool(urlparse.urlparse(url).netloc)


def _getUrlDomain(url):
    return urlparse.urlparse(url).hostname.lower()


def _addHttpToUrlIfNeeded(url):
    if not bool(urlparse.urlparse(url).scheme):
        return "http:" + url
    else:
        return url


def _getCompleteUrl(url, baseUrl):
    if not baseUrl:
Пример #10
0
        zero[:, :, 3] = array[:, :, 3]
        return zero

    def to_red(self, array):
        zero = np.zeros(array.shape)
        zero[:, :, 0] = array[:, :, 0]
        zero[:, :, 3] = array[:, :, 3]
        return zero
    
    def celluloid(array):
        pass

    def to_grayscale(self, array, filter=='weighted'):
        if filter == 'weighted' or filter == 'w'
            zero = np.zeros(array.shape)
            zero[:, :, 0] = array[:, :, 0]*0.299+array[:, :, 1]*0.587+array[:, :, 2]*0.114
            zero[:, :, 1] = array[:, :, 0]*0.299+array[:, :, 1]*0.587+array[:, :, 2]*0.114
            zero[:, :, 2] = array[:, :, 0]*0.299+array[:, :, 1]*0.587+array[:, :, 2]*0.114
            zero[:, :, 3] = array[:, :, 3]
        return zero

if __name__ == '__main__':
    ip = ImageProcessor()
    cf = ColorFilter()
    img = ip.load('../ex01/42.png')
    #N_img = cf.invert(img)
    #ip.display(N_img)
    b_img = cf.to_blue(img)
    g_img = cf.to_grayscale(img, 'ff')
    ip.display(g_img)
Пример #11
0
from imageProcessor import ImageProcessor
#import matplotlib.pyplot as plt

path = open("/Users/tbigot/POOL_py/day03/resources/42AI.png", "r")

r = ImageProcessor()

img = r.load("../resources/42AI.png")
r.display(img)
print(img)

#print(path)
#print(r.load)
Пример #12
0
if is_raspberry_pi():
    from pyqtpicam import PiVideoStream
    from autoFocus import AutoFocus
    from voiceCoil import VoiceCoil
    from heater import Heater
else:
    from videoStreamer import VideoStream

WIDTH = 640  # 3280 # 1648 # 1664 # 640 (1920 16:9)
HEIGHT = 480  # 2464 # 1232 # 928 # 480 (1088 16:9)
'''
main application
'''
app = QApplication([])
window = MainWindow()
processor = ImageProcessor()
statsComputer = StatsComputer()
tracker = CentroidTracker()

# Instantiate objects
if is_raspberry_pi():
    vs = PiVideoStream(resolution=(WIDTH, HEIGHT),
                       monochrome=True,
                       framerate=FRAME_RATE,
                       effect='blur',
                       use_video_port=USE_VIDEO_PORT)
    pio = pigpio.pi()
    vc = VoiceCoil(pio)
    af = None
    heater = Heater(pio, 2000)
else:
Пример #13
0
        array_slice = np.asarray(
            tuple(i for i in range(0, array.shape[axis], n)))
        array = np.delete(array, array_slice, axis)
        return array

    def juxtapose(self, array, n, axis):
        seq = []
        for i in range(n):
            seq.append(array)
        array = np.concatenate(tuple(seq), axis)
        return array

    def mosaic(self, array, dimension):
        array = self.juxtapose(array, dimension[0], 0)
        array = self.juxtapose(array, dimension[1], 1)
        return array


if __name__ == '__main__':
    ip = ImageProcessor()
    img_array = ip.load('../ex01/42.png')
    sb = ScrapBooker()
    img_croped = sb.crop(img_array, (50, 50))
    img_thin = sb.thin(img_array, 2, 0)
    img_conc = sb.juxtapose(img_array, 2, 0)
    img_mos = sb.mosaic(img_array, (3, 2))
    ip.display(img_mos)
    #ip.display(img_conc)
    #ip.display(img_croped)
    #ip.display(img_thin)
Пример #14
0
from autoFocus import AutoFocus
from checkWiFi import CheckWiFi

if __name__ == "__main__":

    settings = QSettings("settings.ini", QSettings.IniFormat)

    # Create event loop and instantiate objects
    app = QApplication(sys.argv)
    mw = MainWindow()
    lw = LogWindow()
    vs = PiVideoStream()
    ph = PrintHat(
    )  # Connect to printhat virtual port (this links the klipper software as well)
    st = SystemTemperatures(interval=10, alarm_temperature=55)
    ip = ImageProcessor()
    bp = BatchProcessor()
    af = AutoFocus(display=True)
    cf = CheckWiFi()

    # Connect logging signals
    bp.setLogFileName.connect(lw.setLogFileName)
    vs.postMessage.connect(lw.append)
    ph.signals.message.connect(lw.append)
    ph.signals.error.connect(lambda s: "error;{}".format(lw.append(s[2])))
    st.signals.message.connect(lw.append)
    st.signals.error.connect(lambda s: "error;{}".format(lw.append(s[2])))
    ip.signals.message.connect(lw.append)
    ip.signals.error.connect(lambda s: "error;{}".format(lw.append(s[2])))
    bp.signals.message.connect(lw.append)
    bp.signals.error.connect(lambda s: "error;{}".format(lw.append(s[2])))
Пример #15
0
 def createImageProcessor(self, verticalLowerBound, horizontalLowerBound, cancellationToken, target, threadID):
     imgProc = ImageProcessor.ImageProcessor(verticalLowerBound, horizontalLowerBound, cancellationToken, target,
                                             threadID)
     imgProc.findObjectCoordinates()