def __init__(self, app, undock_action=None): gtk.VBox.__init__(self) self.set_border_width(SPACING) self.app = app self.settings = app.settings self.system = app.system Loggable.__init__(self) self.log("New PitiviViewer") self.pipeline = None self._tmp_pipeline = None # Used for displaying a preview when trimming self.sink = None self.docked = True # Only used for restoring the pipeline position after a live clip trim preview: self._oldTimelinePos = None self._haveUI = False self._createUi() self.target = self.internal self.undock_action = undock_action if undock_action: self.undock_action.connect("activate", self._toggleDocked) if not self.settings.viewerDocked: self.undock()
def __init__(self, train_config, data_model_wrapper, language_config, output_dir, validation_model, plot_model, cont_model): Loggable.__init__(self) self.train_config = train_config self.langs = language_config.langs self.logger.info('Language order: {0}'.format([ (i, l) for i, l in enumerate(self.langs) ])) # Train setup self.do_train = strings.TRAIN in data_model_wrapper.data_models.keys() if self.do_train: self.output_dir = os.path.join(output_dir, strings.TRAIN_OUTPUT_FOLDER_NAME) self.dim = data_model_wrapper.dim # Word pairs self.train_data_model = data_model_wrapper.data_models[ strings.TRAIN] # Valid setup self.do_valid = validation_model is not None if self.do_valid: # Validation model self.validation_model = validation_model self.validation_model.set_datamodel( data_model_wrapper.data_models[strings.VALID]) else: self.logger.info( 'Validation will be skipped !!! - no validation process is required' ) self.plot_model = plot_model self.cont_model = cont_model
def __init__(self, browser, sample_composition_graph, template_graph): self.browser = browser self.sample_composition = sample_composition_graph self.template_graph = template_graph.copy() self.log = Loggable(self) self.gid = next(self.counter) self.solution = None
def __init__(self, instance, hadj): Gtk.DrawingArea.__init__(self) Zoomable.__init__(self) Loggable.__init__(self) self.log("Creating new ScaleRuler") self.app = instance self._seeker = Seeker() self.hadj = hadj hadj.connect("value-changed", self._hadjValueChangedCb) self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.SCROLL_MASK) self.pixbuf = None # all values are in pixels self.pixbuf_offset = 0 self.pixbuf_offset_painted = 0 # This is the number of width we allocate for the pixbuf self.pixbuf_multiples = 4 self.position = 0 # In nanoseconds self.pressed = False self.need_update = True self.min_frame_spacing = 5.0 self.frame_height = 5.0 self.frame_rate = Gst.Fraction(1 / 1) self.ns_per_frame = float(1 / self.frame_rate) * Gst.SECOND self.connect('draw', self.drawCb) self.connect('configure-event', self.configureEventCb) self.callback_id = None
def __init__(self, name, test_manager, tests=[]): Loggable.__init__(self) self.name = name self.test_manager = test_manager self._tests = {} for test in tests: self._tests[test.classname] = test
def __init__(self, options): Loggable.__init__(self) self.options = options self._start_time = 0 self.stats = {'timeout': 0, 'failures': 0, 'passed': 0, 'skipped': 0} self.results = []
def test_update(self, capsys): logger = Loggable("loggable_test") logger.set_level("INFO") track = logger.track("INFO", total=100).enter() track.update(10, "10% there!") track.update(20, "20% there!") track.exit()
def __init__(self): Loggable.__init__(self) self.undo_stacks = [] self.redo_stacks = [] self.stacks = [] self.running = False self._checkpoint = self._takeSnapshot()
def __init__(self, xml_path): Loggable.__init__(self) self._xml_path = xml_path self.media_xml = ET.parse(xml_path).getroot() # Sanity checks self.media_xml.attrib["duration"] self.media_xml.attrib["seekable"]
def __init__(self): Gtk.Button.__init__(self) Loggable.__init__(self) self.image = Gtk.Image() self.add(self.image) self.playing = False self.setPlay() self.connect('clicked', self._clickedCb)
def __new__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(ScenarioManager, cls).__new__( cls, *args, **kwargs) cls._instance.config = None cls._instance.discovered = False Loggable.__init__(cls._instance) return cls._instance
def __init__(self): Loggable.__init__(self) self.options = None self.testers = [] self.tests = [] self.reporter = None self._list_testers() self.wanted_tests_patterns = []
def __init__(self): Loggable.__init__(self) self.tests = set([]) self.unwanted_tests = set([]) self.options = None self.args = None self.reporter = None self.wanted_tests_patterns = [] self.blacklisted_tests_patterns = []
def test_prefix(self, capsys): log = Loggable("loggable_test") log.set_level("INFO") timeit = log.timeit(logging.INFO, "prefix") timeit.enter() timeit.info("Some information") timeit.exit() log, _ = capsys.readouterr() print(log) assert "prefix" in log
def __init__(self, options): Loggable.__init__(self) self.options = options self._start_time = 0 self.stats = {'timeout': 0, 'failures': 0, 'passed': 0, 'skipped': 0 } self.results = []
def test_spawn(self, capsys): log = Loggable("loggable_test") log.set_level("INFO") log2 = log.timeit(logging.INFO) log2.enter() log2.info("log2") log2.exit() msg, _ = capsys.readouterr() assert "started" in msg.lower() assert "log2" in msg assert "finished" in msg.lower()
def __init__(self, options): Loggable.__init__(self) self._current_test = None self.out = None self.options = options self.stats = {'timeout': 0, 'failures': 0, 'passed': 0, 'skipped': 0 } self.results = []
def __init__(self, pipeline, video_overlay): Loggable.__init__(self) Signallable.__init__(self) self._pipeline = pipeline self._bus = self._pipeline.get_bus() self._bus.add_signal_watch() self._bus.connect("message", self._busMessageCb) self._listening = False # for the position handler self._listeningInterval = 300 # default 300ms self._listeningSigId = 0 self._duration = Gst.CLOCK_TIME_NONE self.video_overlay = video_overlay
def stepFrame(self, framerate, frames_offset): """ Seek backwards or forwards a certain amount of frames (frames_offset). This clamps the playhead to the project frames. """ cur_frame = int(round(self.getPosition() * framerate.num / float(Gst.SECOND * framerate.denom), 2)) new_frame = cur_frame + frames_offset new_pos = long(new_frame * Gst.SECOND * framerate.denom / framerate.num) Loggable.info(self, "From frame %d to %d at %f fps, seek to %s s" % (cur_frame, new_frame, framerate.num / framerate.denom, new_pos / float(Gst.SECOND))) self.simple_seek(new_pos)
def __init__(self, timeout=80): """ @param timeout (optional): the amount of miliseconds for a seek attempt """ Signallable.__init__(self) Loggable.__init__(self) self.timeout = timeout self.pending_seek_id = None self.position = None self.format = None self._time = None
def test_does_not_log(self, capsys): log = Loggable("loggable_test") log.set_level("ERROR") timeit = log.timeit(logging.INFO) timeit.enter() timeit.info("ok") time.sleep(0.1) timeit.exit() log, _ = capsys.readouterr() print(log) assert not log
def __init__(self, settings=None): Gtk.DrawingArea.__init__(self) Loggable.__init__(self) self.seeker = Seeker() self.settings = settings self.box = None self.stored = False self.area = None self.zoom = 1.0 self.sink = None self.pixbuf = None self.pipeline = None self.transformation_properties = None
def __setstate__(self, state): self._edge_counter = state["_edge_counter"] self._node_counter = state["_node_counter"] self.edges = state["edges"] self._plans = [] self._plan_ids = state["plan_ids"] self.is_cached = state["is_cached"] self._weights = state["_weights"] self.updated_at = state["updated_at"] self.created_at = state["created_at"] # self.browser = state['browser'] self.log = Loggable(self)
def __init__(self, data_wrapper_config, embedding_config, language_config): Loggable.__init__(self) self.data_models = dict() embedding_model_wrapper = EmbeddingModelWrapper( language_config=language_config, embedding_config=embedding_config) for (key, data_model_config) in data_wrapper_config.data_configs.items(): self.logger.info('Creating data model for {} ...'.format( key.upper())) self.data_models[key] = DataModel( language_config=language_config, data_model_config=data_model_config, embedding_model_wrapper=embedding_model_wrapper) self.dim = embedding_model_wrapper.get_dim()
def __init__(self, settings=None): gtk.DrawingArea.__init__(self) Loggable.__init__(self) self.seeker = Seeker() self.settings = settings self.box = None self.stored = False self.area = None self.zoom = 1.0 self.sink = None self.pixbuf = None self.pipeline = None self.transformation_properties = None for state in range(gtk.STATE_INSENSITIVE + 1): self.modify_bg(state, self.style.black)
def __init__(self, language_config, data_model_config, embedding_model_wrapper): Loggable.__init__(self) # Configs self.language_config = language_config self.data_model_config = data_model_config if self.data_model_config.emb is not None: own_embedding_model_wrapper = EmbeddingModelWrapper( language_config=language_config, embedding_config=self.data_model_config.embedding_config) self.embeddings = own_embedding_model_wrapper.embeddings else: self.embeddings = embedding_model_wrapper.embeddings # { (l1, l2) : [(w1, w2)] } self.word_pairs_dict = self._get_word_pairs_dict(self.embeddings)
def test_pickle(): logger1 = Loggable("Parent") s = pickle.dumps(logger1) logger2 = pickle.loads(s) assert logger1.name == "Parent"
def __init__(self, pipeline): Loggable.__init__(self) Signallable.__init__(self) self._pipeline = pipeline self._bus = self._pipeline.get_bus() self._bus.add_signal_watch() self._bus.connect("message", self._busMessageCb) # Initially, we set a synchronous bus message handler so that the xid # is known right away and we can set the viewer synchronously, avoiding # the creation of an external window. # Afterwards, the xid-message is handled async (to avoid deadlocks). self._bus.set_sync_handler(self._busSyncMessageHandler) self._has_sync_bus_handler = True self._listening = False # for the position handler self._listeningInterval = 300 # default 300ms self._listeningSigId = 0 self._duration = gst.CLOCK_TIME_NONE
def __init__(self, model_config, language_config, output_dir, type): Loggable.__init__(self) self.model_config = model_config self.langs = language_config.langs # Counters self.sim_lang_wise = dict() self.sim_cumm = [] self.precs_lang_wise = dict() # Output folder folder_name = '' if type == strings.VALID: folder_name = strings.VALID_OUTPUT_FOLDER_NAME elif type == strings.TEST: folder_name = strings.TEST_OUTPUT_FOLDER_NAME self.output_dir = os.path.join(output_dir, folder_name) os.makedirs(self.output_dir)
def test_does_log(self, level, level_as_str, capsys): logger = Loggable("test") fxn = getattr(logger, level.lower()) level_str = level if not level_as_str: level = getattr(logging, level.upper()) # check log logger.set_level(level) msg = str(uuid4()) fxn(msg) log, _ = capsys.readouterr() print(log) assert msg in log assert level_str.upper() in log
def __init__(self, **unused_kw): Loggable.__init__(self) self.videowidth = 720 self.videoheight = 576 self.render_scale = 100 self.videorate = gst.Fraction(25, 1) self.videopar = gst.Fraction(16, 15) self.audiochannels = 2 self.audiorate = 44100 self.audiodepth = 16 self.vencoder = None self.aencoder = None self.muxer = "oggmux" # A (muxer -> containersettings) map. self._containersettings_cache = {} # A (vencoder -> vcodecsettings) map. self._vcodecsettings_cache = {} # A (aencoder -> acodecsettings) map. self._acodecsettings_cache = {}
def __init__(self, cost: float, paths: list, graph: BrowserGraph, item=None): self.cost = cost self.graph = graph self.paths = paths self.item = item self.log = Loggable(self)
def __init__(self, browser, plans=None, name=None): self.browser = browser self.log = Loggable( self, "AutoPlanner@{url}".format(url=self.browser.session.url) ) if plans: self.plans = plans self.browser.update_cache(plans) self.weight_container = EdgeWeightContainer( self.browser, self._hash_afts, self._external_aft_hash, plans=plans ) self._template_graph = None self.model_filters = {} if name is None: name = "unnamed_{}".format(id(self)) self.name = name self._version = __version__ self.created_at = str(arrow.now()) self.updated_at = str(arrow.now())
def __init__(self, application_name, classname, options, reporter, duration=0, timeout=DEFAULT_TIMEOUT, hard_timeout=None): """ @timeout: The timeout during which the value return by get_current_value keeps being exactly equal @hard_timeout: Max time the test can take in absolute """ Loggable.__init__(self) self.timeout = timeout self.hard_timeout = hard_timeout self.classname = classname self.options = options self.application = application_name self.command = "" self.reporter = reporter self.process = None self.duration = duration self.clean()
def test_basic_log(self, capsys): logger = Loggable("loggable_test") logger.set_level("INFO") logger.track("INFO").info("log2") log, _ = capsys.readouterr() assert "INFO" in log
def __init__(self, classname, options, reporter, combination, uri, media_descriptor, timeout=DEFAULT_TIMEOUT, scenario=None): Loggable.__init__(self) file_dur = long(media_descriptor.get_duration()) / GST_SECOND try: timeout = G_V_PROTOCOL_TIMEOUTS[media_descriptor.get_protocol()] except KeyError: pass super(GstValidateTranscodingTest, self).__init__( GST_VALIDATE_TRANSCODING_COMMAND, classname, options, reporter, duration=file_dur, timeout=timeout, scenario=scenario) self.media_descriptor = media_descriptor self.uri = uri self.combination = combination self.dest_file = ""
def test_log(self, level, capsys): logger = Loggable("test") msg = str(uuid4()) logger.set_level(level) logger.log(msg, level) log, _ = capsys.readouterr() print(log) assert msg in log
def __init__(self, browser, edge_hash, node_hash, plans, plan_ids=None): """EdgeCalculator initializer. :param browser: the Browser object :type browser: Browser :param edge_hash: The edge hashing function. Should take exactly 2 arguments. :type edge_hash: function :param node_hash: The node hashing function. Should take exactly 1 argument. :type node_hash: function :param plans: optional list of plans :type plans: list """ self.browser = browser self.log = Loggable( self, "EdgeWeightContainer({})".format(self.browser.session) ) # filter only those plans that have operations self._plans = [] if plan_ids is not None: self._plan_ids = plan_ids else: self._plan_ids = [] if plans is not None: self.plans = plans def new_edge_hash(pair): h = edge_hash(pair) return "{}_{}_{}".format( pair[0].field_type.parent_id, h, pair[1].field_type.parent_id ) self.edges = [] self._edge_counter = HashCounter(new_edge_hash) self._node_counter = HashCounter(node_hash) self._weights = {} self.created_at = str(arrow.now()) self.updated_at = None self.is_cached = False
def __init__(self, elementfactory, properties={}): Loggable.__init__(self) self.debug("factory:%s, properties:%s", elementfactory, properties) self.builder = gtk.Builder() self.builder.add_from_file(os.path.join(get_ui_dir(), "elementsettingsdialog.ui")) self.builder.connect_signals(self) self.ok_btn = self.builder.get_object("okbutton1") self.window = self.builder.get_object("dialog1") self.elementsettings = GstElementSettingsWidget() self.builder.get_object("viewport1").add(self.elementsettings) self.factory = elementfactory self.element = self.factory.create("elementsettings") if not self.element: self.warning("Couldn't create element from factory %s", self.factory) self.properties = properties self._fillWindow() # Try to avoid scrolling, whenever possible. screen_height = self.window.get_screen().get_height() contents_height = self.elementsettings.size_request()[1] maximum_contents_height = max(500, 0.7 * screen_height) if contents_height < maximum_contents_height: # The height of the content is small enough, disable the scrollbars. default_height = -1 scrolledwindow = self.builder.get_object("scrolledwindow1") scrolledwindow.set_policy(gtk.POLICY_NEVER, gtk.POLICY_NEVER) scrolledwindow.set_shadow_type(gtk.SHADOW_NONE) else: # If we need to scroll, set a reasonable height for the window. default_height = 600 self.window.set_default_size(300, default_height) self.window.show()
def test_not_enabled(self): logger = Loggable("loggable_test") logger.set_level("CRITICAL") track = logger.track("INFO") assert not track.is_enabled() track = logger.track("CRITICAL") assert track.is_enabled()
def test_no_update(self, capsys): logger = Loggable("loggable_test") logger.set_level("CRITICAL") assert not logger.is_enabled("INFO") track = logger.track("INFO", total=100).enter() track.update(10, "10% there!") track.update(20, "20% there!") track.exit()
def test_timeit(self, capsys): log = Loggable("loggable_test") log.set_level("INFO") timeit = log.timeit(logging.INFO) timeit.enter() timeit.info("ok") time.sleep(0.1) timeit.exit() log, _ = capsys.readouterr() print(log) assert "started" in log.lower() assert "ok" in log assert "finished" in log.lower()
def test_pickle_span(): logger1 = Loggable("Parent") logger1.set_level("INFO") logger2 = logger1.spawn("new name") assert logger1._children assert logger2._children == {} logger3 = pickle.loads(pickle.dumps(logger1)) logger4 = pickle.loads(pickle.dumps(logger2)) assert dict(logger3._children) assert logger3._children[logger2._id] is logger2 assert logger4._children == {} for lvl in ["ERROR", "DEBUG", "INFO"]: logger1.set_level(lvl) assert logger1.level_name() == lvl assert logger2.level_name() == lvl assert logger3.level_name() == lvl assert logger4.level_name() == lvl
def test_does_not_log(self, level, capsys): logger = Loggable("test") fxn = getattr(logger, level.lower()) # does log logger.set_level(level) msg = str(uuid4()) fxn(msg) log, _ = capsys.readouterr() # print(log) assert msg in log assert level.upper() in log # does not log logger.set_level(logging.CRITICAL + 1) msg = str(uuid4()) fxn(msg) log, _ = capsys.readouterr() assert not log assert not _
class Foo(object): def __init__(self): self.log = Loggable(self) def bar(self): self.log.info("bar")
def __init__(self): Loggable.__init__(self)
from loggable import Loggable logger = Loggable("pyblast")
def __init__(self): TestsManager.__init__(self) Loggable.__init__(self) self._uris = [] self._run_defaults = True
def __init__(self, instance): gtk.VBox.__init__(self) Loggable.__init__(self) self.log("Init PreviewWidget") self.connect('destroy', self._destroy_cb) self.settings = instance.settings self.preview_cache = {} self.preview_cache_errors = {} self.discoverer = gst.pbutils.Discoverer(gst.SECOND) #playbin for play pics self._unsurePlaybin() bus = self.player.get_bus() bus.add_signal_watch() bus.connect('message', self._bus_message_cb) bus.enable_sync_message_emission() bus.connect('sync-message::element', self._sync_message_cb) bus.connect('message::tag', self._tag_found_cb) self.__videosink = self.player.get_property("video-sink") self.__fakesink = gst.element_factory_make("fakesink", "fakesink") #some global variables for preview handling self.is_playing = False self.time_format = gst.Format(gst.FORMAT_TIME) self.original_dims = (PREVIEW_WIDTH, PREVIEW_HEIGHT) self.countinuous_seek = False self.current_selected_uri = "" self.current_preview_type = "" self.description = "" self.tags = {} # Gui elements: # Drawing area for video output self.preview_video = ViewerWidget() self.preview_video.modify_bg(gtk.STATE_NORMAL, self.preview_video.style.black) self.pack_start(self.preview_video, expand=False) # An image for images and audio self.preview_image = gtk.Image() self.preview_image.set_size_request(self.settings.FCpreviewWidth, self.settings.FCpreviewHeight) self.preview_image.show() self.pack_start(self.preview_image, expand=False) # Play button self.bbox = gtk.HBox() self.play_button = gtk.ToolButton(gtk.STOCK_MEDIA_PLAY) self.play_button.connect("clicked", self._on_start_stop_clicked_cb) self.bbox.pack_start(self.play_button, expand=False) #Scale for position handling self.pos_adj = gtk.Adjustment() self.seeker = gtk.HScale(self.pos_adj) self.seeker.set_update_policy(gtk.UPDATE_DISCONTINUOUS) self.seeker.connect('button-press-event', self._on_seeker_press_cb) self.seeker.connect('button-release-event', self._on_seeker_press_cb) self.seeker.connect('motion-notify-event', self._on_motion_notify_cb) self.seeker.set_draw_value(False) self.seeker.show() self.bbox.pack_start(self.seeker) # Zoom buttons self.b_zoom_in = gtk.ToolButton(gtk.STOCK_ZOOM_IN) self.b_zoom_in.connect("clicked", self._on_zoom_clicked_cb, 1) self.b_zoom_out = gtk.ToolButton(gtk.STOCK_ZOOM_OUT) self.b_zoom_out.connect("clicked", self._on_zoom_clicked_cb, -1) self.bbox.pack_start(self.b_zoom_in, expand=False) self.bbox.pack_start(self.b_zoom_out, expand=False) self.bbox.show_all() self.pack_start(self.bbox, expand=False, fill=False) # Label for metadata tags self.l_tags = gtk.Label() self.l_tags.set_justify(gtk.JUSTIFY_LEFT) self.l_tags.set_ellipsize(pango.ELLIPSIZE_END) self.l_tags.show() self.pack_start(self.l_tags, expand=False, fill=False) # Error handling vbox = gtk.VBox() vbox.set_spacing(SPACING) self.l_error = gtk.Label(_("PiTiVi can not preview this file.")) self.b_details = gtk.Button(_("More info")) self.b_details.connect('clicked', self._on_b_details_clicked_cb) vbox.pack_start(self.l_error) vbox.pack_start(self.b_details, expand=False, fill=False) vbox.show() self.pack_start(vbox, expand=False, fill=False)
def __init__(self, input_dir): Loggable.__init__(self) self.input_dir = input_dir
def __init__(self): Loggable.__init__(self) self.syn0 = None self.index2word = None
def __init__(self, instance): gtk.VBox.__init__(self) Loggable.__init__(self) self.log("Init PreviewWidget") self.connect('destroy', self._destroy_cb) self.settings = instance.settings self.preview_cache = {} self.preview_cache_errors = {} self.discoverer = gst.pbutils.Discoverer(gst.SECOND) #playbin for play pics self._unsurePlaybin() bus = self.player.get_bus() bus.add_signal_watch() bus.connect('message', self._bus_message_cb) bus.enable_sync_message_emission() bus.connect('sync-message::element', self._sync_message_cb) bus.connect('message::tag', self._tag_found_cb) self.__videosink = self.player.get_property("video-sink") self.__fakesink = gst.element_factory_make("fakesink", "fakesink") #some global variables for preview handling self.is_playing = False self.time_format = gst.Format(gst.FORMAT_TIME) self.original_dims = (PREVIEW_WIDTH, PREVIEW_HEIGHT) self.countinuous_seek = False self.current_selected_uri = "" self.current_preview_type = "" self.description = "" self.tags = {} # Play button self.bbox = gtk.HBox() self.play_button = gtk.ToolButton(gtk.STOCK_MEDIA_PLAY) self.play_button.connect("clicked", self._on_start_stop_clicked_cb) self.bbox.pack_start(self.play_button, expand=False) #Scale for position handling self.pos_adj = gtk.Adjustment() self.seeker = gtk.HScale(self.pos_adj) self.seeker.set_update_policy(gtk.UPDATE_DISCONTINUOUS) self.seeker.connect('button-press-event', self._on_seeker_press_cb) self.seeker.connect('button-release-event', self._on_seeker_press_cb) self.seeker.connect('motion-notify-event', self._on_motion_notify_cb) self.seeker.set_draw_value(False) self.seeker.show() self.bbox.pack_start(self.seeker) self.pack_start(self.bbox, expand=False, fill=False) self.preview_video = ViewerWidget() self.preview_image = gtk.Image() #Metadata self.l_tags = gtk.Label() #Error handling self.l_error = gtk.Label(_("KSE can not preview this file.")) self.b_details = gtk.Button(_("More info")) self.b_zoom_in = gtk.ToolButton(gtk.STOCK_ZOOM_IN) self.b_zoom_in.connect("clicked", self._on_zoom_clicked_cb, 1) self.b_zoom_out = gtk.ToolButton(gtk.STOCK_ZOOM_OUT) self.b_zoom_out.connect("clicked", self._on_zoom_clicked_cb, -1)
class NetworkOptimizer: """Class that finds optimal Steiner Tree (""" counter = counter() def __init__(self, browser, sample_composition_graph, template_graph): self.browser = browser self.sample_composition = sample_composition_graph self.template_graph = template_graph.copy() self.log = Loggable(self) self.gid = next(self.counter) self.solution = None def _cinfo(self, msg, foreground="white", background="black"): self.log.info(cstring(msg, foreground, background)) ############################ # RUN ############################ def run_stage0(self): self._cinfo("STAGE 0: Sample Composition") self.update_sample_composition() graph = self.create_sample_composition_graphs(self.template_graph, self.browser, self.sample_composition) return graph def run_stage1(self, graph): self._cinfo("STAGE 1 Cleaning Graph") self.clean_graph(graph) def run_stage2(self, graph, goal_sample, goal_object_type, ignore): if ignore is None: ignore = [] self._cinfo("STAGE 2: Terminal Nodes") start_nodes = self.extract_items(graph) start_nodes += self.extract_leaf_operations(graph) start_nodes = [n for n in start_nodes if n not in ignore] end_nodes = self.extract_end_nodes(graph, goal_sample, goal_object_type) return start_nodes, end_nodes def run_stage3(self, graph, start_nodes, end_nodes): self._cinfo("STAGE 3: Optimizing") return self.optimize_steiner_tree(start_nodes, end_nodes, graph, []) def run(self, goal_object_type, goal_sample=None, ignore=None): if goal_sample is None: goal_sample = self.root_samples()[0] existing_item = self.browser.session.Item.one( query={ "sample_id": goal_sample.id, "object_type_id": goal_object_type.id }) if existing_item: print("Existing item found: {}".format(existing_item)) self.solution = NetworkSolution(cost=0, paths=[], graph=None, item=existing_item) return self.solution if goal_sample.sample_type_id != goal_object_type.sample_type_id: raise Exception( "ObjectType {ot} does not match Sample {s}. '{s}' is a {st} but " "ObjectType '{ot}' refers to " "a '{otst}'".format( ot=goal_object_type.name, s=goal_sample.name, st=goal_sample.sample_type.name, otst=goal_object_type.sample_type.name, )) ############################ # Stage 0 ############################ graph = self.run_stage0() if goal_sample.id not in self.sample_composition: raise Exception( "Sample id={} not found in sample composition".format( goal_sample.id)) ############################ # Stage 1 ############################ self.run_stage1(graph) ############################ # Stage 2 ############################ start_nodes, end_nodes = self.run_stage2(graph, goal_sample, goal_object_type, ignore) ############################ # Stage 3 ############################ cost, paths, visited_samples = self.run_stage3(graph, start_nodes, end_nodes) self.solution = NetworkSolution(cost=cost, paths=paths, graph=graph) return self.solution ############################ # PLAN ############################ def plan(self, canvas=None, solution=None) -> Planner: """Converts a path through a :class:`BrowserGraph` into an Aquarium Plan. :param canvas: Planner instance :param solution: :return: """ if canvas is None: canvas = Planner(self.browser.session) canvas.plan.operations = [] if solution is None: solution = self.solution if solution.paths: graph = solution.graph.copy() for path_num, path in enumerate(solution.paths): print("Path: {}".format(path_num)) self._plan_assign_field_values(path, graph, canvas) self._plan_assign_items(path, graph, canvas) print() return canvas @classmethod def _plan_assign_field_values(cls, path, graph, canvas): """Assign :class:`FieldValue` to a path. :param path: list of node_ids :param graph: BrowserGraph instance :param canvas: Planner instance :return: """ prev_node = None for n, ndata in graph.iter_model_data(model_class="AllowableFieldType", nbunch=path): aft = ndata["model"] sample = ndata["sample"] if aft.field_type.role == "output": # create and set output operation if "operation" not in ndata: # print("Creating field value") op = canvas.create_operation_by_type_id( aft.field_type.parent_id) fv = op.output(aft.field_type.name) canvas.set_field_value(fv, sample=sample) ndata["field_value"] = fv ndata["operation"] = op else: op = ndata["operation"] if prev_node: input_aft = prev_node[1]["model"] input_sample = prev_node[1]["sample"] input_name = input_aft.field_type.name if input_aft.field_type.array: # print( # "Setting input array {} to sample='{}'".format( # input_name, input_sample # ) # ) input_fv = canvas.set_input_field_value_array( op, input_name, sample=input_sample) else: # print( # "Setting input {} to sample='{}'".format( # input_name, input_sample # ) # ) input_fv = canvas.set_field_value(op.input(input_name), sample=input_sample) # print("Setting input field_value for '{}'".format(prev_node[0])) prev_node[1]["field_value"] = input_fv prev_node[1]["operation"] = op prev_node = (n, ndata) @classmethod def _plan_assign_items(cls, path, graph, canvas): """Assign :class:`Item` in a path. :param path: list of node_ids :param graph: BrowserGraph instance :param canvas: Planner instance :return: """ prev_node = None for n, ndata in graph.iter_model_data(model_class="AllowableFieldType", nbunch=path): if (ndata["node_class"] == "AllowableFieldType" and ndata["model"].field_type.role == "input"): if "operation" not in ndata: cls.print_aft(graph, n) raise Exception( "Key 'operation' not found in node data '{}'".format( n)) input_fv = ndata["field_value"] input_sample = ndata["sample"] if prev_node: node_class = prev_node[1]["node_class"] if node_class == "AllowableFieldType": output_fv = prev_node[1]["field_value"] canvas.add_wire(output_fv, input_fv) elif node_class == "Item": item = prev_node[1]["model"] if input_fv.field_type.part: canvas.set_part(input_fv, item) else: canvas.set_field_value(input_fv, sample=input_sample, item=item) else: raise Exception( "Node class '{}' not recognized".format( node_class)) prev_node = (n, ndata) ############################ # UTILS ############################ def clean_graph(self, graph): """Remove internal wires with different routing id but same sample. :param graph: :type graph: :return: :rtype: """ removal = [] afts = graph.models("AllowableFieldType") self.browser.retrieve(afts, "field_type") for n1, n2 in tqdm(list(graph.edges)): node1 = graph.get_node(n1) node2 = graph.get_node(n2) if (node1["node_class"] == "AllowableFieldType" and node2["node_class"] == "AllowableFieldType"): aft1 = node1["model"] aft2 = node2["model"] ft1 = aft1.field_type ft2 = aft2.field_type if ft1.role == "input" and ft2.role == "output": if (ft1.routing != ft2.routing and node1["sample"].id == node2["sample"].id): removal.append((n1, n2)) if (ft1.routing == ft2.routing and node1["sample"].id != node2["sample"].id): removal.append((n1, n2)) print("Removing edges with same sample but different routing ids") print(len(graph.edges)) graph.graph.remove_edges_from(removal) return graph def update_sample_composition(self): updated_sample_composition = self.expand_sample_composition( browser=self.browser, graph=self.sample_composition) self.sample_composition = updated_sample_composition return self.sample_composition def print_sample_composition(self): for s1, s2 in self.sample_composition.edges: s1 = self.sample_composition.nodes[s1] s2 = self.sample_composition.nodes[s2] print(s1["sample"].name + " => " + s2["sample"].name) def root_samples(self): nodes = graph_utils.find_leaves(self.sample_composition) return [self.sample_composition.nodes[n]["sample"] for n in nodes] @classmethod def expand_sample_composition(cls, browser, samples=None, graph=None): if graph is None: graph = nx.DiGraph() if samples is None: graph_copy = nx.DiGraph() graph_copy.add_nodes_from(graph.nodes(data=True)) graph_copy.add_edges_from(graph.edges(data=True)) graph = graph_copy samples = [graph.nodes[n]["sample"] for n in graph] if not samples: return graph browser.recursive_retrieve(samples, {"field_values": "sample"}) new_samples = [] for s1 in samples: fvdict = s1._field_value_dictionary() for ft in s1.sample_type.field_types: if ft.ftype == "sample": fv = fvdict[ft.name] if not isinstance(fv, list): fv = [fv] for _fv in fv: if _fv: s2 = _fv.sample if s2: new_samples.append(s2) graph.add_node(s1.id, sample=s1) graph.add_node(s2.id, sample=s2) graph.add_edge(s2.id, s1.id) return cls.expand_sample_composition(browser, new_samples, graph) @staticmethod def decompose_template_graph_into_samples(template_graph, samples, include_none=True): """From a template graph and list of samples, extract sample specific nodes from the template graph (using sample type id) :param template_graph: :type template_graph: :param samples: :type samples: :return: :rtype: """ sample_type_ids = {s.sample_type_id for s in samples} sample_type_graphs = defaultdict(list) if include_none: sample_type_ids.add(None) samples.append(none_sample) samples = list(set(samples)) for n, ndata in template_graph.iter_model_data(): if ndata["node_class"] == "AllowableFieldType": if ndata["model"].sample_type_id in sample_type_ids: sample_type_graphs[ndata["model"].sample_type_id].append(n) sample_graphs = {} for sample in samples: nodes = sample_type_graphs[sample.sample_type_id] sample_graph = template_graph.subgraph(nodes) sample_graph.set_prefix("Sample{}_".format(sample.id)) for n, ndata in sample_graph.iter_model_data(): ndata["sample"] = sample sample_graphs[sample.id] = sample_graph return sample_graphs @staticmethod def _find_parts_for_samples(browser, sample_ids, lim=50): all_parts = [] part_type = browser.find_by_name("__Part", model_class="ObjectType") for sample_id in sample_ids: sample_parts = browser.last( lim, model_class="Item", query=dict(object_type_id=part_type.id, sample_id=sample_id), ) all_parts += sample_parts browser.retrieve(all_parts, "collections") # filter out parts that do not exist all_parts = [ part for part in all_parts if part.collections and part.collections[0].location != "deleted" ] # create a Part-by-Sample-by-ObjectType dictionary data = {} for part in all_parts: if part.collections: data.setdefault(part.collections[0].object_type_id, {}).setdefault(part.sample_id, []).append(part) return data def create_sample_composition_graphs(self, template_graph, browser, sample_composition): """Break a template_graph into subgraphs comprising of individual samples obtained from the sample composition graph. The `sample_composition` graph is a directed graph that defines how samples may be built from other samples. Meanwhile, the `template_graph` defines all possible connections between :class:`AllowableFieldType` and the associated weights of each edge determined from the :class:`AutoPlannerModel`. Using the `sample_composition` graph, we grab individual subgraphs from the template graph for each node in the sample compositions graph (using SampleType). The edges of the `sample_composition` graph determines which edges of these new subgraphs can be connected to each other, forming the final graph used in the Steiner tree optimization algorithm. :param template_graph: :param browser: :param sample_composition: :return: """ sample_edges = [] graphs = [] samples = [] for item in sample_composition.nodes: samples.append(sample_composition.nodes[item]["sample"]) sample_graph_dict = self.decompose_template_graph_into_samples( template_graph, samples) for s1, s2 in sample_composition.edges: s1 = sample_composition.nodes[s1]["sample"] s2 = sample_composition.nodes[s2]["sample"] sample_graph1 = sample_graph_dict[s1.id] sample_graph2 = sample_graph_dict[s2.id] graphs += [sample_graph1.graph, sample_graph2.graph] input_afts = [ aft for aft in sample_graph1.iter_models("AllowableFieldType") if aft.field_type.role == "input" ] output_afts = [ aft for aft in sample_graph2.iter_models("AllowableFieldType") if aft.field_type.role == "output" ] pairs = AutoPlannerModel._match_internal_afts( input_afts, output_afts) pairs = [(sample_graph1.node_id(aft1), sample_graph2.node_id(aft2)) for aft1, aft2 in pairs] sample_edges += pairs # include the afts from operations that have no sample_type_id (e.g. Order Primer) if None in sample_graph_dict: graphs.append(sample_graph_dict[None].graph) # TODO: add None edges for internal graphs... graph = BrowserGraph(browser) graph.graph = nx.compose_all(graphs) graph.cache_models() self.log.info("Adding {} sample-to-sample edges".format( len(sample_edges))) for n1, n2 in tqdm(sample_edges): assert n1 in graph assert n2 in graph node1 = graph.get_node(n1) node2 = graph.get_node(n2) aft1 = node1["model"] aft2 = node2["model"] x = (template_graph.node_id(aft1), template_graph.node_id(aft2)) edge = template_graph.get_edge(*x) graph.add_edge(n1, n2, edge_type="sample_to_sample", weight=edge["weight"]) afts = list(graph.iter_models(model_class="AllowableFieldType")) browser.retrieve(afts, "field_type") sample_ids = list({ ndata["sample"].id for _, ndata in graph.iter_model_data() if ndata["sample"] is not None }) ############################## # Get items ############################## non_part_afts = [aft for aft in afts if not aft.field_type.part] object_type_ids = list({aft.object_type_id for aft in non_part_afts}) self._cinfo( "finding all relevant items for {} samples and {} object_types". format(len(sample_ids), len(object_type_ids))) items = browser.where( model_class="Item", query={ "sample_id": sample_ids, "object_type_id": object_type_ids }, ) items = [item for item in items if item.location != "deleted"] self.log.info("{} total items found".format(len(items))) items_by_object_type_id = defaultdict(list) for item in items: items_by_object_type_id[item.object_type_id].append(item) ############################## # Get parts ############################## self._cinfo("finding relevant parts/collections") part_by_sample_by_type = self._find_parts_for_samples(browser, sample_ids, lim=50) self._cinfo("found {} collection types".format( len(part_by_sample_by_type))) ############################## # Assign Items/Parts/Collections ############################## new_items = [] new_edges = [] for node, ndata in graph.iter_model_data( model_class="AllowableFieldType"): aft = ndata["model"] sample = ndata["sample"] if sample: sample_id = sample.id sample_type_id = sample.sample_type_id else: sample_id = None sample_type_id = None if aft.sample_type_id == sample_type_id: if aft.field_type.part: parts = part_by_sample_by_type.get(aft.object_type_id, {}).get(sample_id, []) for part in parts[-1:]: if part.sample_id == sample_id: new_items.append(part) new_edges.append((part, sample, node)) else: items = items_by_object_type_id[aft.object_type_id] for item in items: if item.sample_id == sample_id: new_items.append(item) new_edges.append((item, sample, node)) for item in new_items: graph.add_model(item) for item, sample, node in new_edges: graph.add_edge(graph.node_id(item), node, weight=0) self.log.info("{} items added to various allowable_field_types".format( len(new_edges))) return graph @staticmethod def print_aft(graph, node_id): if node_id == "END": return try: node = graph.get_node(node_id) if node["node_class"] == "AllowableFieldType": aft = node["model"] print( "<AFT id={:<10} sample={:<10} {:^10} {:<10} '{:<10}:{}'>". format( aft.id, node["sample"].name, aft.field_type.role, aft.field_type.name, aft.field_type.operation_type.category, aft.field_type.operation_type.name, )) elif node["node_class"] == "Item": item = node["model"] sample_name = "None" if item.sample: sample_name = item.sample.name print("<Item id={:<10} {:<20} {:<20}>".format( item.id, sample_name, item.object_type.name)) except Exception as e: print(node_id) print(e) pass def extract_leaf_operations(self, graph): """Extracts operations that have no inputs (such as "Order Primer") :param graph: :type graph: :return: :rtype: """ leaf_afts = [] for n, ndata in graph.iter_model_data( model_class="AllowableFieldType"): aft = ndata["model"] if aft.field_type.role == "output": node_id = self.template_graph.node_id(aft) preds = self.template_graph.predecessors(node_id) if not list(preds): leaf_afts.append(n) return leaf_afts def extract_items(self, graph): item_groups = [] for n, ndata in graph.iter_model_data(model_class="Item"): for succ in graph.graph.successors(n): grouped = [] for n2 in graph.graph.predecessors(succ): node = graph.get_node(n2) if node["node_class"] == "Item": grouped.append(n2) item_groups.append(tuple(grouped)) items = list(set(reduce(lambda x, y: list(x) + list(y), item_groups))) return items def extract_end_nodes(self, graph, goal_sample, goal_object_type): end_nodes = [] for n, ndata in graph.iter_model_data( model_class="AllowableFieldType"): aft = ndata["model"] if (ndata["sample"].id == goal_sample.id and aft.object_type_id == goal_object_type.id and aft.field_type.role == "output"): end_nodes.append(n) return end_nodes @staticmethod def get_sister_inputs(node, node_data, output_node, graph, ignore=None): """Returns a field_type_id to nodes.""" sister_inputs = defaultdict(list) if (node_data["node_class"] == "AllowableFieldType" and node_data["model"].field_type.role == "input"): aft = node_data["model"] successor = output_node predecessors = list(graph.predecessors(successor)) print(len(predecessors)) for p in predecessors: if p == node or (ignore and p in ignore): continue pnode = graph.get_node(p) if pnode["node_class"] == "AllowableFieldType": is_array = pnode["model"].field_type.array is True if (not is_array and pnode["model"].field_type_id == aft.field_type_id): continue if is_array: key = "{}_{}".format(pnode["model"].field_type_id, pnode["sample"].id) else: key = str(pnode["model"].field_type_id) sister_inputs[key].append((p, pnode)) return sister_inputs def _print_nodes(self, node_ids, graph): print(node_ids) items = list(graph.iter_models(nbunch=node_ids, model_class="Item")) self.browser.retrieve(items, "sample") self.browser.retrieve(items, "object_type") grouped_by_object_type = {} for item in items: grouped_by_object_type.setdefault(item.object_type.name, []).append(item) for otname, items in grouped_by_object_type.items(): cprint(otname, "white", "black") for item in items: sample_name = "None" if item.sample: sample_name = item.sample.name print(" <Item id={} {} {}".format(item.id, item.object_type.name, sample_name)) for n, ndata in graph.iter_model_data(model_class="AllowableFieldType", nbunch=node_ids): self.print_aft(graph, n) def _optimize_get_seed_paths( self, start_nodes, end_nodes, bgraph, visited_end_nodes, output_node=None, verbose=False, ): paths = [] end_nodes = [e for e in end_nodes if e not in visited_end_nodes] if verbose: print("START") self._print_nodes(start_nodes, bgraph) print("END") print(end_nodes) self._print_nodes(end_nodes, bgraph) print("VISITED: {}".format(visited_end_nodes)) for start in start_nodes: for end in end_nodes: through_nodes = [start, end] if output_node: through_nodes.append(output_node) try: cost, path = graph_utils.top_paths(through_nodes, bgraph) except nx.exception.NetworkXNoPath: continue paths.append((cost, path)) return paths def _gather_assignments(self, path, bgraph, visited_end_nodes, visited_samples, depth): input_to_output = OrderedDict() for n1, n2 in zip(path[:-1], path[1:]): node2 = bgraph.get_node(n2) node1 = bgraph.get_node(n1) if "sample" in node1: visited_samples.add(node1["sample"].id) if "sample" in node2: visited_samples.add(node2["sample"].id) if node2["node_class"] == "AllowableFieldType": aft2 = node2["model"] if aft2.field_type.role == "output": input_to_output[n1] = n2 print("PATH:") for p in path: print(p) self.print_aft(bgraph, p) # iterate through each input to find unfullfilled inputs inputs = list(input_to_output.keys())[:] print(input_to_output.keys()) if depth > 0: inputs = inputs[:-1] # print() # print("INPUTS: {}".format(inputs)) # all_sister empty_assignments = defaultdict(list) for i, n in enumerate(inputs): print() print("Finding sisters for:") self.print_aft(bgraph, n) output_n = input_to_output[n] ndata = bgraph.get_node(n) sisters = self.get_sister_inputs(n, ndata, output_n, bgraph, ignore=visited_end_nodes) if not sisters: print("no sisters found") for ftid, nodes in sisters.items(): print("**Sister FieldType {}**".format(ftid)) for s, values in nodes: self.print_aft(bgraph, s) empty_assignments["{}_{}".format(output_n, ftid)].append( (s, output_n, values)) print() ############################################ # 4.3 recursively find cost & shortest paths # for unassigned inputs for every possible # assignment ############################################ all_assignments = list(product(*empty_assignments.values())) print(all_assignments) for k, v in empty_assignments.items(): print(k) print(v) return all_assignments # TODO: fix issue with seed path # TODO: During the seed stage, this algorithm can get 'stuck' in a non-optimal solution, # making it difficult to plan 'short' experimental plans. As an example, planning # PCRs can get stuck on 'Anneal Oligos' since this is the shortest seed path. # But this results in a sample penalty since the Template from the sample # composition is unfullfilled. # There is no procedure currently in place to solve this issue. # Solution 1: Instead of using the top seed path, evaluate the top 'N' seed # paths, picking the best one # Solution 2: Evaluate the top 'N' most 'different' seed paths # Solution 3: Rank seed paths not only on path length/cost, but also on their # visited samples. # The most visited samples, the better the path. However, longer paths have more # visited samples, # hence usually a higher path length/cost. It would be difficult to weight # these two aspects of the seed path. def optimize_steiner_tree( self, start_nodes, end_nodes, bgraph, visited_end_nodes, visited_samples=None, output_node=None, verbose=True, depth=0, ): # TODO: Algorithm gets stuck on shortest top path... # e.g. Yeast Glycerol Stock to Yeast Mating instead of yeast transformation if visited_samples is None: visited_samples = set() ############################################ # 1. find all shortest paths ############################################ seed_paths = self._optimize_get_seed_paths(start_nodes, end_nodes, bgraph, visited_end_nodes, output_node, verbose) visited_end_nodes += end_nodes ############################################ # 2. find overall shortest path(s) ############################################ NUM_PATHS = 1 THRESHOLD = 10**8 if not seed_paths: if verbose: print("No paths found") return math.inf, [], visited_samples seed_paths = sorted(seed_paths, key=lambda x: x[0]) best = [] for cost, path in seed_paths[:NUM_PATHS]: visited_samples_copy = set(visited_samples) final_paths = [path] if cost > THRESHOLD: cprint("Path beyond threshold, returning early", "red") print(graph_utils.get_path_length(bgraph, path)) return cost, final_paths, visited_samples_copy if verbose: cprint("Single path found with cost {}".format(cost), None, "blue") cprint(graph_utils.get_path_weights(bgraph, path), None, "blue") ############################################ # 3. mark edges as 'visited' ############################################ bgraph_copy = bgraph.copy() edges = list(zip(path[:-1], path[1:])) for e1, e2 in edges: edge = bgraph_copy.get_edge(e1, e2) edge["weight"] = 0 ############################################ # 4.1 input-to-output graph ############################################ input_to_output = self._input_to_output_graph( bgraph_copy, path, visited_samples_copy) ############################################ # 4.2 search for all unassigned inputs ############################################ print("PATH:") for p in path: print(p) self.print_aft(bgraph, p) # iterate through each input to find unfullfilled inputs empty_assignments = self._gather_empty_assignments( bgraph, bgraph_copy, depth, input_to_output, visited_end_nodes) ############################################ # 4.3 recursively find cost & shortest paths # for unassigned inputs for every possible # assignment ############################################ cost, final_paths, visited_samples_copy = self._best_assignment( bgraph_copy, cost, depth, empty_assignments, final_paths, start_nodes, visited_end_nodes, visited_samples_copy, ) ############################################ # 5 Make a sample penalty for missing input samples ############################################ output_samples = set() for path in final_paths: for node in path: ndata = bgraph_copy.get_node(node) if "sample" in ndata: output_samples.add(ndata["sample"]) expected_samples = set() for sample in output_samples: for pred in self.sample_composition.predecessors(sample.id): expected_samples.add(pred) sample_penalty = max([ (len(expected_samples) - len(visited_samples_copy)) * 10000, 0 ]) best.append({ "cost": cost, "final_paths": final_paths, "visited_samples": visited_samples_copy, "expected_samples": expected_samples, "sample_penalty": sample_penalty, "final_paths": final_paths, }) best = sorted(best, key=lambda x: x["cost"] + x["sample_penalty"]) ############################################ # return cost and paths ############################################ selected = best[0] cprint("SAMPLES {}/{}".format(len(selected["visited_samples"]), len(selected["expected_samples"]))) cprint("COST AT DEPTH {}: {}".format(depth, selected["cost"]), None, "red") cprint("SAMPLE PENALTY: {}".format(selected["sample_penalty"]), None, "red") cprint("VISITED SAMPLES: {}".format(selected["visited_samples"]), None, "red") return ( selected["cost"] + selected["sample_penalty"], selected["final_paths"], selected["visited_samples"], ) def _best_assignment( self, bgraph_copy, cost, depth, empty_assignments, final_paths, start_nodes, visited_end_nodes, visited_samples, ): all_assignments = list(product(*empty_assignments.values())) print(all_assignments) for k, v in empty_assignments.items(): print(k) print(v) if all_assignments[0]: # TODO: enforce unique sample_ids if in operation_type cprint("Found {} assignments".format(len(all_assignments)), None, "blue") best_assignment_costs = [] for assign_num, assignment in enumerate(all_assignments): cprint( "Evaluating assignment {}/{}".format( assign_num + 1, len(all_assignments)), None, "red", ) cprint("Assignment length: {}".format(len(assignment)), None, "yellow") assignment_cost = 0 assignment_paths = [] assignment_samples = set(visited_samples) for end_node, output_node, _ in assignment: _cost, _paths, _visited_samples = self.optimize_steiner_tree( start_nodes, [end_node], bgraph_copy, visited_end_nodes[:], assignment_samples, output_node, verbose=True, depth=depth + 1, ) assignment_cost += _cost assignment_paths += _paths assignment_samples = assignment_samples.union( _visited_samples) best_assignment_costs.append( (assignment_cost, assignment_paths, assignment_samples)) cprint([(len(x[2]), x[0]) for x in best_assignment_costs], "green") best_assignment_costs = sorted(best_assignment_costs, key=lambda x: (-len(x[2]), x[0])) cprint( "Best assignment cost returned: {}".format( best_assignment_costs[0][0]), "red", ) cost += best_assignment_costs[0][0] final_paths += best_assignment_costs[0][1] visited_samples = visited_samples.union( best_assignment_costs[0][2]) return cost, final_paths, visited_samples def _gather_empty_assignments(self, bgraph, bgraph_copy, depth, input_to_output, visited_end_nodes): empty_assignments = defaultdict(list) inputs = list(input_to_output.keys())[:] print(input_to_output.keys()) if depth > 0: inputs = inputs[:-1] for i, n in enumerate(inputs): print() print("Finding sisters for:") self.print_aft(bgraph, n) output_n = input_to_output[n] ndata = bgraph_copy.get_node(n) sisters = self.get_sister_inputs(n, ndata, output_n, bgraph_copy, ignore=visited_end_nodes) if not sisters: print("no sisters found") for ftid, nodes in sisters.items(): print("**Sister FieldType {}**".format(ftid)) for s, values in nodes: self.print_aft(bgraph, s) empty_assignments["{}_{}".format(output_n, ftid)].append( (s, output_n, values)) print() return empty_assignments def _input_to_output_graph(self, bgraph_copy, path, visited_samples): input_to_output = OrderedDict() for n1, n2 in zip(path[:-1], path[1:]): node2 = bgraph_copy.get_node(n2) node1 = bgraph_copy.get_node(n1) if "sample" in node1: visited_samples.add(node1["sample"].id) if "sample" in node2: visited_samples.add(node2["sample"].id) if node2["node_class"] == "AllowableFieldType": aft2 = node2["model"] if aft2.field_type.role == "output": input_to_output[n1] = n2 return input_to_output
def __init__(self, language_config, embedding_config): Loggable.__init__(self) self.language_config = language_config self.embedding_config = embedding_config self._get_sil2fb_map() self._read_embeddings()
def __init__(self, app, layer, layer_type): Gtk.VBox.__init__(self, spacing=0) Loggable.__init__(self) self._app = app self.layer = layer self._selected = False context = self.get_style_context() # get the default color for the current theme self.UNSELECTED_COLOR = context.get_background_color(Gtk.StateFlags.NORMAL) # use base instead of bg colors so that we get the lighter color # that is used for list items in TreeView. self.SELECTED_COLOR = context.get_background_color(Gtk.StateFlags.SELECTED) table = Gtk.Table(rows=2, columns=2) table.set_border_width(2) table.set_row_spacings(3) table.set_col_spacings(3) self.eventbox = Gtk.EventBox() self.eventbox.add(table) self.eventbox.connect("button_press_event", self._buttonPressCb) self.pack_start(self.eventbox, True, True, 0) self.sep = SpacedSeparator() self.pack_start(self.sep, True, True, 0) icon_mapping = {GES.TrackType.AUDIO: "audio-x-generic", GES.TrackType.VIDEO: "video-x-generic"} # Folding button # TODO use images fold_button = TwoStateButton("▼", "▶") fold_button.set_relief(Gtk.ReliefStyle.NONE) fold_button.set_focus_on_click(False) fold_button.connect("changed-state", self._foldingChangedCb) table.attach(fold_button, 0, 1, 0, 1) # Name entry self.name_entry = Gtk.Entry() self.name_entry.set_tooltip_text(_("Set a personalized name for this layer")) self.name_entry.set_property("primary-icon-name", icon_mapping[layer_type]) self.name_entry.connect("focus-in-event", self._focusChangeCb, False) self.name_entry.connect("focus-out-event", self._focusChangeCb, True) self.name_entry.connect("button_press_event", self._buttonPressCb) # self.name_entry.drag_dest_unset() self.name_entry.set_sensitive(False) # 'Solo' toggle button self.solo_button = Gtk.ToggleButton() self.solo_button.set_tooltip_markup(_("<b>Solo mode</b>\n" "Other non-soloed layers will be disabled as long as " "this is enabled.")) solo_image = Gtk.Image() solo_image.set_from_icon_name("avatar-default-symbolic", Gtk.IconSize.MENU) self.solo_button.add(solo_image) self.solo_button.connect("toggled", self._soloToggledCb) self.solo_button.set_relief(Gtk.ReliefStyle.NONE) self.solo_button.set_sensitive(False) # CheckButton visible_option = Gtk.CheckButton() visible_option.connect("toggled", self._visibilityChangedCb) visible_option.set_active(True) visible_option.set_sensitive(False) visible_option.set_tooltip_markup(_("<b>Enable or disable this layer</b>\n" "Disabled layers will not play nor render.")) # Upper bar upper = Gtk.HBox() upper.pack_start(self.name_entry, True, True, 0) upper.pack_start(self.solo_button, False, False, 0) upper.pack_start(visible_option, False, False, 0) # Lower bar self.lower_hbox = Gtk.HBox() self.lower_hbox.set_sensitive(False) table.attach(upper, 1, 2, 0, 1) table.attach(self.lower_hbox, 1, 2, 1, 2) self.show_all() # Popup Menu self.popup = Gtk.Menu() layer_delete = Gtk.ImageMenuItem(_("_Delete layer")) layer_delete.connect("activate", self._deleteLayerCb) layer_delete.set_image(Gtk.Image.new_from_icon_name("edit-delete", Gtk.IconSize.MENU)) self.layer_up = Gtk.ImageMenuItem(_("Move layer up")) self.layer_up.connect("activate", self._moveLayerCb, -1) self.layer_up.set_image(Gtk.Image.new_from_icon_name("go-up", Gtk.IconSize.MENU)) self.layer_down = Gtk.ImageMenuItem(_("Move layer down")) self.layer_down.connect("activate", self._moveLayerCb, 1) self.layer_down.set_image(Gtk.Image.new_from_icon_name("go-down", Gtk.IconSize.MENU)) self.layer_first = Gtk.ImageMenuItem(_("Move layer to top")) self.layer_first.connect("activate", self._moveLayerCb, -2) self.layer_first.set_image(Gtk.Image.new_from_icon_name("go-top", Gtk.IconSize.MENU)) self.layer_last = Gtk.ImageMenuItem(_("Move layer to bottom")) self.layer_last.connect("activate", self._moveLayerCb, 2) self.layer_last.set_image(Gtk.Image.new_from_icon_name("go-bottom", Gtk.IconSize.MENU)) self.popup.append(self.layer_first) self.popup.append(self.layer_up) self.popup.append(self.layer_down) self.popup.append(self.layer_last) self.popup.append(Gtk.SeparatorMenuItem()) self.popup.append(layer_delete) for menu_item in self.popup: menu_item.set_use_underline(True) self.popup.show_all()