Ejemplo n.º 1
0
def glass():
    pipe.process('glass',
                 skip_frames=200,
                 n_frames=800,
                 resize=.5,
                 tracking_box=[55, 485, 82, 530],
                 warp_mode='t',
                 fps=30)
Ejemplo n.º 2
0
def foosball():
    pipe.process('foosball',
                 skip_frames=566,
                 n_frames=1600,
                 resize=.5,
                 deanimate_mask_path='input/foosball/deanimate_mask.png',
                 warp_mode='h',
                 fps=30)
Ejemplo n.º 3
0
 def work(self, item):
     """ Feed jobs from the queue into the pipeline """
     try:
         data = json.loads(item)
         process(source(self.item_collection, data['id']), self.pipeline)
     except Exception, e:
         import traceback
         logger.error("Problem! " + str(e))
         logger.error(traceback.format_exc())
Ejemplo n.º 4
0
    def run(offset=0):
        docs = item_collection.find(search_params, offset=offset)
        for doc in docs['docs']:
            #pass
            process(lambda: doc, pipeline)

        offset += len(docs['docs'])
        if offset < docs['total']:
            run(offset=offset)
        else:
            print 'done'
Ejemplo n.º 5
0
    def run(offset=0):
        docs = item_collection.find(search_params, offset=offset)
        for doc in docs["docs"]:
            # pass
            process(lambda: doc, pipeline)

        offset += len(docs["docs"])
        # print "Running for " + str(docs['total']) + " docs"
        if offset < docs["total"]:
            run(offset=offset)
        else:
            print "done"
Ejemplo n.º 6
0
def main():
    """Parse stream of requests and insert into MongoDB collection.

    This script will accept input from either stdin or one or more files as
    arguments. Two loggers control logging--one general purpose logger for the
    application and one for logging requests that fail to make it through the
    pipeline. The latter is configured to route different kinds of failures to
    different streams as configured. The failed requests will be logged
    unmodified, as they entered the pipeline, to make later attempts at
    processing easier.

    Failure to send any requests through the pipeline will result in an exit
    status of 1.
    """
    req_buffer = []

    for line in fileinput.input():
        try:
            request = process(line)
        except apachelog.ApacheLogParserError:
            # log unparseable requests
            req_log.error(line.strip(), extra={'err_type': 'REQUEST_ERROR'})
            continue
        except requests.exceptions.RequestException:
            req_log.error(line.strip(), extra={'err_type': 'DSPACE_ERROR'})
            continue
        except Exception, e:
            log.error(e, extra={'inputfile': fileinput.filename(),
                                'inputline': fileinput.filelineno()})
            continue
        if request:
            req_buffer.append(request)
        if len(req_buffer) > 999:
            insert(collection, req_buffer)
            req_buffer = []
    def execute(self, REQUEST=None, RESPONSE=None):
        """ """
        if not self.isActive():
            return
        
        if getattr(REQUEST, 'reset_date', False):
            self.setResetDate(True)

        histories = self.getDeploymentHistory()
        history = histories.makeHistory()
    
        Log.attachLogMonitor(history)
        
        try:
            try:
                pipeline = self.getPipeline()
                pipeline.process( self )
            except:
                if not DefaultConfiguration.DEPLOYMENT_DEBUG:
                    raise
                import sys, pdb, traceback
                ec, e, tb = sys.exc_info()
                print ec, e
                print traceback.print_tb( tb )
                #pdb.post_mortem( tb )
                raise 
        finally:
            Log.detachLogMonitor(history)

        #history.recordStatistics(display)
        histories.attachHistory(history)

        self.getDeploymentPolicy().setResetDate(False)
        #Uncomment if/when you want to have users with Manager role receive email
        #when a deployment completes. Helpful, for large sites with long-running
        #deployments.
        #self.exportNotify(REQUEST)
        
        if RESPONSE:
            return RESPONSE.redirect('manage_workspace')
        return True
    def execute(self, REQUEST=None, RESPONSE=None):
        """ """
        if not self.isActive():
            return
        
        if getattr(REQUEST, 'reset_date', False):
            self.setResetDate(True)

        histories = self.getDeploymentHistory()
        history = histories.makeHistory()
    
        Log.attachLogMonitor(history)
        
        try:
            try:
                pipeline = self.getPipeline()
                pipeline.process( self )
            except:
                if not DefaultConfiguration.DEPLOYMENT_DEBUG:
                    raise
                import sys, pdb, traceback
                ec, e, tb = sys.exc_info()
                print ec, e
                print traceback.print_tb( tb )
                #pdb.post_mortem( tb )
                raise 
        finally:
            Log.detachLogMonitor(history)

        #history.recordStatistics(display)
        histories.attachHistory(history)

        self.getDeploymentPolicy().setResetDate(False)

        if RESPONSE:
            return RESPONSE.redirect('manage_workspace')
        return True
Ejemplo n.º 9
0
def calculate():

    directory = directory_entry.get()
    threshold = threshold_entry.get()
    save = save_loc_entry.get()
    include = included_check.get()
    search = subdirs_check.get()

    error_list = check_inputs(directory, save, threshold)

    if len(error_list) > 0:
        error_popup(error_list)
    else:
        result = process(directory, save, threshold, include, search)
        if result == "Success":
            messagebox.showinfo("Success", "Report generated!")
        else:
            messagebox.showerror("Error", result)
Ejemplo n.º 10
0
drawer_sr = comicolorization_sr.drawer.Drawer(
    path_result_directory=args.super_resolution_model_directory,
    gpu=args.gpu,
    colorization_class=comicolorization_sr.colorization_task.
    ComicolorizationTask,
)
drawer_sr.load_model(iteration=args.super_resolution_model_iteration)

# prepare datas
image = Image.open(args.input_image).convert('RGB')
rects = json.load(open(args.panel_rectangle))
reference_images = [
    Image.open(path).convert('RGB') for path in args.reference_images
]
assert len(rects) == len(reference_images)

# prepare pipeline
pipeline = pipeline.PagePipeline(
    drawer=drawer,
    drawer_sr=drawer_sr,
    image=image,
    reference_images=reference_images,
    threshold_binary=190,
    threshold_line=130,
    panel_rects=rects,
)

# draw
drawn_image = pipeline.process()
drawn_image.save(args.output)
Ejemplo n.º 11
0
def run_for_single(item_collection, doc_id):
    pipeline = set_pipeline_steps(item_collection=item_collection)
    process(source(item_collection, doc_id), pipeline)
Ejemplo n.º 12
0
filemenu.add_separator()
filemenu.add_command(label="Exit", command=exit_prog)

helpmenu = Menu(menu)
menu.add_cascade(label="Help", menu=helpmenu)
helpmenu.add_command(label="About...", command=launch_about_window)

if __name__ == "__main__":
    # read path provided
    if len(sys.argv) > 1:
        # write path provided
        if len(sys.argv) > 2:
            # threshold provided
            if len(sys.argv) > 3:
                # include inoffensive provided
                if len(sys.argv) > 4:
                    # include sub directories provided
                    if len(sys.argv) > 5:
                        process(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
                    else:
                        process(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
                else:
                    process(sys.argv[1], sys.argv[2], sys.argv[3])
            else:
                process(sys.argv[1], sys.argv[2])
        else:
            process(sys.argv[1])
    # path not provided
    else:
        mainloop()
Ejemplo n.º 13
0
import pipeline
import exporter
import sklearn

if __name__ == "__main__":

    if len(sys.argv) == 2:
        filepath = str(sys.argv[1])

        if os.path.isfile(filepath) and filepath.endswith('.json'):
            print("Import Google Takout data (can be long)... ", end='')
            df = parser.importJson(filepath)
            print("Done !")

            print("Process trajectories... ", end='')
            data = pipeline.process(df)
            print("Done !")

            print("Export to json... ", end='')
            json = exporter.generateJson(data)
            print("Done !")

            print("Write to file... ", end='')
            file = open("output.json", "w")
            file.write(json)
            file.close()
            print("Done !")

        elif not filepath.endswith('.json'):
            print("File must be in JSON format.")