def directConnect(factor): for ctl, chan, drv, tgt, alias, val in iterConns("MEm.shapes.connsFlower"): oCtl = pym.PyNode(ctl) oShape = Shapes(tgt) tgtAttr = oShape[alias] factorName = ctl + "_factorNode" factorNode = [ node for node in oCtl.translateX.listConnections() if node.name() == factorName ] print "s:", factorNode if not factorNode: try: factorNode = pym.PyNode(factorName) except pym.MayaNodeError: factorNode = pym.createNode("multiplyDivide", n=factorName) factorNode = pym.PyNode(factorNode) print "c:", factorNode oCtl.translateX.connect(factorNode.input1X) else: factorNode = factorNode[0] print "o:", factorNode factorNode.input2X.set(factor) tgtNode = [ attr for attr in factorNode.outputX.listConnections(p=True) if attr == tgtAttr ] print "factor2Tgt", tgtNode if not tgtNode: factorNode.outputX.connect(tgtAttr)
def __init__(self): gtk.Window.__init__(self, title='Chem') #adding a box box = gtk.Box() self.add(box) #packing drawing area to the box darea = Shapes() box.pack_start(darea, True, True, 0) #packing grid to the box grid = gtk.Grid() box.pack_end(grid, True, True, 0) #attaching buttons to grid buttons = Buttons() buttons._attach_carbon_buttons(grid) buttons._attach_connections_buttons(grid) buttons._attach_organic_functions_buttons(grid) buttons._attach_search_button(grid) #adding event buttons._connect_search_button() buttons._connect_connections_buttons(darea.draw)
def train_dataloader(self): transform = self.data_transforms() if self.params['dataset'] == 'celeba': dataset = CelebA(root=self.params['data_path'], split="train", transform=transform, download=False) elif self.params['dataset'] == 'shapes': dataset = Shapes(root=self.params['data_path'], split="train", transform=transform) else: raise ValueError('Undefined dataset type') self.num_train_imgs = len(dataset) return DataLoader(dataset, batch_size=self.params['batch_size'], shuffle=True, drop_last=True)
def val_dataloader(self): transform = self.data_transforms() if self.params['dataset'] == 'celeba': self.sample_dataloader = DataLoader(CelebA(root=self.params['data_path'], split="test", transform=transform, download=False), batch_size=144, shuffle=True, drop_last=True) self.num_val_imgs = len(self.sample_dataloader) elif self.params['dataset'] == 'shapes': self.sample_dataloader = DataLoader(Shapes(root=self.params['data_path'], split="test", transform=transform), batch_size=144, shuffle=True, drop_last=True) self.num_val_imgs = len(self.sample_dataloader) else: raise ValueError('Undefined dataset type') return self.sample_dataloader
import os import string import pygame as pg from shapes import Shapes from config import config from score import Score from color import colors, ColorEffect score = Score() shape = Shapes() next_shape = Shapes() color_effect = ColorEffect(length=15) class Relationship(): def Button(self): pass class Button: def __init__(self, key, function, draw_on): self.key = key # Положення та розміри кнопки self.x = config.buttons_size[self.key]['x'] self.y = config.buttons_size[self.key]['y'] self.w = config.buttons_size[self.key]['w'] self.h = config.buttons_size[self.key]['h']
import cv2 from drawwindow import DrawWindow from shapes import Shapes, PolyLine, PolyPoints, PixelRegion, RectangleShape, EllipseShape import random # #debug=0 ##cv2.RNG random(12345) # # ## add some lines w=400 h=500 ##PolyLine *PL S = Shapes() for i in range(5): PL = PolyLine() PL.setPenColor(random.randint(50,250),random.randint(50,250),random.randint(50,250)) for i in range(10): PL.addPoint((random.randint(100,w-100),random.randint(100,h-100))) S.addShape(PL) # add a poly poregion PP = PolyPoints() ww=100 delx=20 dely=200
def setUp(self): self.test0 = Shapes()
import numpy as np from config import fps, pointer_size, start, end from cursor import Cursor from download import fetch_file from metadata import Metadata from shapes import Shapes metadata = Metadata(fetch_file("metadata.xml")) print(f'found "{metadata.meetingName}"') print(f"starting on {metadata.starttime}") cursor = Cursor(fetch_file("cursor.xml")) shapes = Shapes(fetch_file("shapes.svg")) for slide in shapes.slides: a = slide.file # pre-download slide images audio = fetch_file("video/webcams.webm", show_progress=True) print("start generating video") if start is None: time = 0 else: time = start if end is None: end = metadata.duration print(end) frame_len = 1 / fps slide_id = -1
def __init__(self, config_provider): text_to_speech = None if (config_provider.acting or config_provider.audio_classifier or config_provider.browser or config_provider.calculator or config_provider.fruit_machine or config_provider.hand_gesture or config_provider.happy_colour or config_provider.iris_classifier or config_provider.mixing_desk or config_provider.optical_character_recognition or config_provider.play_your_cards_right or config_provider.shapes or config_provider.slideshow or config_provider.weather): from texttospeech import TextToSpeech text_to_speech = TextToSpeech() speech_to_text = None if (config_provider.acting or config_provider.browser or config_provider.calculator or config_provider.fruit_machine or config_provider.iris_classifier or config_provider.mixing_desk or config_provider.phrase_translation or config_provider.play_your_cards_right or config_provider.weather): from speechtotext import SpeechToText speech_to_text = SpeechToText() self.acting = None if config_provider.acting: from acting import Acting self.acting = Acting(text_to_speech, speech_to_text) self.audio_classifier = None if config_provider.audio_classifier: from audioclassifier import AudioClassifier self.audio_classifier = AudioClassifier(text_to_speech) self.browser = None if config_provider.browser: from browser import Browser self.browser = Browser(text_to_speech, speech_to_text) self.calculator = None if config_provider.calculator: from calculator import Calculator self.calculator = Calculator(text_to_speech, speech_to_text) self.fruit_machine = None if config_provider.fruit_machine: from fruitmachine import FruitMachine self.fruit_machine = FruitMachine(text_to_speech, speech_to_text) self.hand_gesture = None if config_provider.hand_gesture: from handgesture import HandGesture self.hand_gesture = HandGesture(text_to_speech) self.happy_colour = None if config_provider.happy_colour: from happycolour import HappyColour self.happy_colour = HappyColour(text_to_speech) self.iris_classifier = None if config_provider.iris_classifier: from irisclassifier import IrisClassifier self.iris_classifier = IrisClassifier(text_to_speech, speech_to_text) self.mixing_desk = None if config_provider.mixing_desk: from mixingdesk import MixingDesk self.mixing_desk = MixingDesk(text_to_speech, speech_to_text) self.optical_character_recognition = None if config_provider.optical_character_recognition: from opticalcharacterrecognition import OpticalCharacterRecognition self.optical_character_recognition = OpticalCharacterRecognition( text_to_speech) self.phrase_translation = None if config_provider.phrase_translation: from phrasetranslation import PhraseTranslation self.phrase_translation = PhraseTranslation(speech_to_text) self.play_your_cards_right = None if config_provider.play_your_cards_right: from playyourcardsright import PlayYourCardsRight self.play_your_cards_right = PlayYourCardsRight( text_to_speech, speech_to_text) self.shapes = None if config_provider.shapes: from shapes import Shapes self.shapes = Shapes(text_to_speech) self.slideshow = None if config_provider.slideshow: from slideshow import Slideshow self.slideshow = Slideshow(text_to_speech) self.television = None if config_provider.television: from television import Television self.television = Television() self.weather = None if config_provider.weather: from weather import Weather self.weather = Weather(text_to_speech, speech_to_text)
num_samples = 1000 iou_thresh = 0.3 max_num_shapes = 3 metrics = ['mean_squared_error'] # loss = JaccardLoss() loss = [DiceLoss(), JaccardLoss(), FocalLoss()] H, W = image_shape = input_shape[:2] batch_size = 5 epochs = 10 freeze = True stop_patience = 5 reduce_patience = 2 experiment_path = 'experiments/' data_manager = Shapes(num_samples, image_shape, iou_thresh=iou_thresh, max_num_shapes=max_num_shapes) num_classes = data_manager.num_classes data = data_manager.load_data() processor = PreprocessSegmentation(image_shape, num_classes) # setting additional callbacks callbacks = [] log_filename = os.path.join(experiment_path, 'optimization.log') log = CSVLogger(log_filename) stop = EarlyStopping('loss', patience=stop_patience) save_filename = os.path.join(experiment_path, 'model.hdf5') save = ModelCheckpoint(save_filename, 'loss', save_best_only=True) plateau = ReduceLROnPlateau('loss', patience=reduce_patience) callbacks.extend([log, stop, save, plateau])
.expects("call6").with_args(arg1, arg2, kw1=kw1, kw2=kw2) ) (Chain(proxy) .call1(arg1) .call2() .call3(kw1=kw1) .call4(arg2) .call5() .call6(arg1, arg2, kw1=kw1, kw2=kw2) ) describe "Managing the proxy": it "is possible to add new proxies and go back to old ones": shapes = ( Chain(Shapes()) .create('square') .chain_promote_value() .set_length(4) .chain_demote_value() .create('rectangle') .chain_promote_value() .set_width(6) .set_length(8) .chain_demote_value() .chain_exit() )