def __init__(self, pi_creature, *args, **kwargs): blinked = deepcopy(pi_creature).blink() Transform.__init__( self, pi_creature, blinked, alpha_func = squish_alpha_func(there_and_back), *args, **kwargs )
def __init__(self, parent_transform = None, rename = False, recursive = False): Transform.__init__(self) self.rename = rename self.recursive = recursive self.parent_transform = parent_transform
def transform_expr(self, expr): if self.is_simple(expr): return Transform.transform_expr(self, expr) stored = self.available_expressions.get(expr) if stored is not None: return stored return Transform.transform_expr(self, expr)
def post_apply(self, fn): type_env = {} for (name,t) in fn.type_env.iteritems(): if self.is_live(name): type_env[name] = t fn.type_env = type_env Transform.post_apply(self, fn) return fn
def transform(args, x_queue, datadir, fname_index, joint_index, o_queue): trans = Transform(args) while True: x = x_queue.get() if x is None: break x, t = trans.transform(x.split(','), datadir, fname_index, joint_index) o_queue.put((x.transpose((2, 0, 1)), t))
class Entity: def __init__(self): self._type = '' self._id = '' self._category = '' self._level = '' self.guid = guid() self.transform = Transform() def loadDb3(self, db3File, guid): self.guid = guid self._category = db3File.queryEntityData(guid, '_Category') self._level = db3File.queryEntityData(guid, '_Level') self.transform.matrix.set([float(i) for i in db3File.queryEntityData(guid, 'Transform').split(',')]) self.transform.setMatrix() def saveDb3(self, db3File): if self._id: db3File.updateEntityData(self.guid, '_ID', self._id + ' (' + self._level + ')') if self._category: db3File.updateEntityData(self.guid, '_Category', self._category) if self._level: db3File.updateEntityData(self.guid, '_Level', self._level) if self.transform: db3File.updateEntityData(self.guid, 'Transform', ', '.join([str(i) for i in self.transform.matrix.get()])) def setAttributes(self, attrs): if 'name' in attrs: self._id = attrs['name'] if 'pos' in attrs: self.transform.position = [float(i) for i in attrs['pos'].split()] if 'rot' in attrs: self.transform.rotation = [float(i) for i in attrs['rot'].split()] if 'scale' in attrs: self.transform.scale = [float(i) for i in attrs['scale'].split()] self.transform.getMatrix() def getAttributes(self): attrs = {} if self._id: attrs['name'] = self._id if len(self.transform.position): attrs['pos'] = ' '.join([str(i) for i in self.transform.position]) if len(self.transform.rotation): attrs['rot'] = ' '.join([str(i) for i in self.transform.rotation]) if len(self.transform.scale): attrs['scale'] = ' '.join([str(i) for i in self.transform.scale]) return attrs #------------------------------------------------------------------------------- # Eof #-------------------------------------------------------------------------------
def __init__(self): Transform.__init__(self) self.adverbs_visited = [] self.adverb_args = [] self.expansions = {} self.exp_stack = [] self.type_env_stack = [] self.dl_tile_estimates = [] self.ml_tile_estimates = [] # For now, we'll assume that no closure variables have the same name. self.closure_vars = {} self.num_tiles = 0
def __init__(self, pi_creature, *args, **kwargs): final = deepcopy(pi_creature) body_to_arm = pi_creature.arm.get_center()-pi_creature.get_center() if body_to_arm[0] < 0: wag_direction = LEFT else: wag_direction = RIGHT final.arm.wag(0.7*UP, wag_direction, 2.0) final.rewire_part_attributes(self_from_parts = True) Transform.__init__( self, pi_creature, final, alpha_func = there_and_back, *args, **kwargs )
def __init__(self): self._type = '' self._id = '' self._category = '' self._level = '' self.guid = guid() self.transform = Transform()
def transform_expr(self, expr): if not isinstance(expr, syntax.Expr): expr = ast_conversion.value_to_syntax(expr) result = Transform.transform_expr(self, expr) assert result.type is not None, \ "Unsupported expression encountered during type inference: %s" % (expr,) return result
def __init__(self, nesting_idx = -1, fixed_idx = -1, tile_sizes_param = None, fixed_tile_sizes = None, preallocate_output = False): Transform.__init__(self) self.nesting_idx = nesting_idx self.fixed_idx = fixed_idx self.tiling = False self.tile_sizes_param = tile_sizes_param self.fixed_tile_sizes = fixed_tile_sizes self.output_var = None self.preallocate_output = preallocate_output # For now, we'll assume that no closure variables have the same name. self.closure_vars = {}
def deserialize(cls, obj): transformations = [] for transform in obj['transformations']: transformations.append(Transform.deserialize(transform)) return cls(obj['width'], obj['height'], obj['seed'], obj['points'], obj['iterations'], transformations)
def transform_block(self, stmts, keep_bindings = False): self.available_expressions.push() self.bindings.push() new_stmts = Transform.transform_block(self, stmts) self.available_expressions.pop() if not keep_bindings: self.bindings.pop() return new_stmts
def tuple_proj(self, tup, idx, explicit_struct = False): if tup.__class__ is Var and tup.name in self.bindings: stored = self.bindings[tup.name] if stored.__class__ is Tuple: return stored.elts[idx] else: return stored.args[idx] else: return Transform.tuple_proj(self, tup, idx, explicit_struct = explicit_struct)
def __init__(self, start_anim, end_anim, **kwargs): digest_config(self, kwargs, locals()) if "run_time" in kwargs: self.run_time = kwargs.pop("run_time") else: self.run_time = max(start_anim.run_time, end_anim.run_time) for anim in start_anim, end_anim: anim.set_run_time(self.run_time) if start_anim.starting_mobject.get_num_points() != end_anim.starting_mobject.get_num_points(): Mobject.align_data(start_anim.starting_mobject, end_anim.starting_mobject) for anim in start_anim, end_anim: if hasattr(anim, "ending_mobject"): Mobject.align_data(anim.starting_mobject, anim.ending_mobject) Transform.__init__(self, start_anim.mobject, end_anim.mobject, **kwargs) #Rewire starting and ending mobjects start_anim.mobject = self.starting_mobject end_anim.mobject = self.ending_mobject
def attr(self, obj, field): if obj.__class__ is Var and obj.name in self.bindings: stored = self.bindings[obj.name] stored_class = stored.__class__ if stored_class is Struct: pos = stored.type.field_pos(field) return stored.args[pos] elif stored_class is Slice or stored_class is ArrayView: return getattr(stored, field) return Transform.attr(self, obj, field)
def pwmControlThread(): # setup arduino serial comm pwm.setPWMFreq(50) t = Transform(True, False) #invert one motor and not the other when constructing watchdog = time.time() # thread main loop while True: time.sleep(.005) # check for data that needs to be bridged to arduino dataFlag = True if not serialRealTimeQueue.empty(): data = serialRealTimeQueue.get() else: dataFlag = False # if data was recieved parse and update pwm hat if dataFlag: data = data[5:-1] print data data_nums = [int(x) for x in data.split(':') if x.strip()] print " ", data_nums[0], " ", data_nums[1] leftMtr,rightMtr = t.transform(data_nums[0],data_nums[1]) print " ", leftMtr , " ", rightMtr setServoPulse(LEFT_MOT, leftMtr) setServoPulse(RIGHT_MOT, rightMtr) Transform.MOTOR_MIN = data_nums[5]*10 Transform.MOTOR_IDLE = data_nums[6]*10 Transform.MOTOR_MAX = data_nums[7]*10 leftIntake = Transform.MOTOR_IDLE rightIntake = Transform.MOTOR_IDLE if data_nums[2] > 127 + 10: # if left trigger pressed (i think) spin motors in opposite direction leftIntake = Transform.map_range(data_nums[2],127,255,Transform.MOTOR_IDLE,Transform.MOTOR_MAX) rightIntake = Transform.map_range(data_nums[2],127,255,Transform.MOTOR_IDLE,Transform.MOTOR_MIN) elif data_nums[3] > 127 + 10: # if right trigger pressed leftIntake = Transform.map_range(data_nums[3],127,255,Transform.MOTOR_IDLE,Transform.MOTOR_MIN) rightIntake = Transform.map_range(data_nums[3],127,255,Transform.MOTOR_IDLE,Transform.MOTOR_MAX) else : leftIntake = Transform.MOTOR_IDLE rightIntake = Transform.MOTOR_IDLE print " " , leftIntake, " ", rightIntake setServoPulse(LEFT_MANIP,leftIntake) setServoPulse(RIGHT_MANIP,rightIntake) watchdog = time.time() if watchdog + WATCHDOG_DELAY < time.time(): setServoPulse(LEFT_MOT, Transform.MOTOR_IDLE) setServoPulse(RIGHT_MOT, Transform.MOTOR_IDLE) setServoPulse(LEFT_MANIP, Transform.MOTOR_IDLE) setServoPulse(RIGHT_MANIP, Transform.MOTOR_IDLE) watchdog = time.time() print "you need to feed the dogs"
def transform_Assign(self, stmt): """ If you encounter an adverb being written to an output location, then why not just use that as the output directly? """ if stmt.lhs.__class__ is Index: rhs_class = stmt.rhs.__class__ if rhs_class is Map: self.transform_Map(stmt.rhs, output=stmt.lhs) return None elif rhs_class is OuterMap: self.transform_OuterMap(stmt.rhs, output=stmt.lhs) return Transform.transform_Assign(self, stmt)
class ClientCommander(Character): def __init__(self, screen, server_commander): super(Character, self).__init__() self.screen = screen self.t = Transform(screen, (0, 100, 0, 100)) self.server_commander = server_commander def key_control(self, key, event): Character.key_control(self, key, event) # Custom comamnder control def mouse_control(self, mouse_pos): pass def click_control(self, mouse_pos, button): removed = False wp = self.t.inv_transform_coord(mouse_pos) for waypoint in self.server_commander.waypoints: if dist(waypoint[0], waypoint[1], wp[0], wp[1]) < 0.25: self.server_commander.rmwp(waypoint[0], waypoint[1]) removed = True break if not removed: self.server_commander.addwp(wp[0], wp[1]) def update(self, game_map): pass def draw(self, game_map): self.t.update_viewport((0, game_map.rows, 0, game_map.cols)) draw_bg(self.screen, self.t) draw_walls(self.screen, self.t, game_map) draw_hero(self.screen, self.t, game_map) draw_units(self.screen, self.t, game_map) draw_bullets(self.screen, self.t, game_map) draw_waypoints(self.screen, self.t, game_map) draw_letterbox(self.screen, self.t)
def __init__(self, screen, server_hero): """ screen: PyGame Surface object. t: Transform object to convert game coords to screen coords. server_hero: Temp reference to server hero object... """ super(Character, self).__init__() self.screen = screen self.t = Transform(screen, (0, ClientHero.VIEW_SQ_RADIUS*2, 0, ClientHero.VIEW_SQ_RADIUS*2)) self.server_hero = server_hero self.vel = (0, 0) self.firing = False self.fired = False self.font = pygame.font.SysFont("monospace", 24)
def __init__(self, fname='/dev/ttyACM0', brate=1000000, dim=(12,10), gamma=2.2): """ Initialise a LedScreen object. >>> screen = LedScreen() """ if type(dim) not in (tuple, list) or len(dim) != 2: raise ValueError("Invalid dimension. Format is tuple(x,y)") abstractled.AbstractLed.__init__(self, dimension=dim, gamma=gamma) self.tty = uspp.SerialPort(fname, timeout=0) #self.tty = uspp.SerialPort(fname, speed=brate, timeout=0) os.environ['LEDWALL_TTY'] = fname os.system("stty -F $LEDWALL_TTY " + str(brate)) self.transform = Transform(*dim) self.b = [(0,0,0)] * self.w * self.h
def __init__(self,args): # create logger self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger self.logger.addHandler(ch) self.logger.debug( "Starting Collector process in %s"%os.getcwd()) #self.logger.debug( "Gevent Version %s"%gevent.__version__) #TODO: move output file name to config fname = "./NetFlow.%d.bin"%int(time.time()) #WARN: might want to remove this after testing self.out = open(fname,"wb") #create tool instances self.interface = Interface() self.parse = Parse() self.context = Context() self.describe = Describe() self.standardize = Standardize() self.transform = Transform() self.partition = Partition() self.q = Queue() self.inWindow = settings.SETTINGS.get("collector","inWindow") self.score = Score() #TODO: move csv name to config #self.csv = CSV("output.csv") self.output = Output() return super(Collector,self).__init__(args)
def fromJSON(json): new_tile = Tile() new_tile._bbox = json['bbox'] new_tile._height = int(json['height']) new_tile._width = int(json['width']) new_tile._layer = int(json['layer']) new_tile._minIntensity = json['minIntensity'] new_tile._maxIntensity = json['maxIntensity'] new_tile._mipmapLevels = json['mipmapLevels'] jsonTransforms = json['transforms'] new_tile._transforms = Transform.fromJSON(jsonTransforms) # re-calculate bounding box for transformed data bb = Tile.calculateBB(new_tile) new_tile._real_width = bb[0] new_tile._real_height = bb[1] new_tile._bbox = [0, bb[0], 0, bb[1]] return new_tile
def __init__(self, fname='/dev/ttyACM0', brate=1000000, dim=(12,10), gamma=2.2): """ Initialise a LedScreen object. >>> screen = LedScreen() """ if type(dim) not in (tuple, list) or len(dim) != 2: raise ValueError("Invalid dimension. Format is tuple(x,y)") self.tty = uspp.SerialPort(fname, timeout=0) #self.tty = uspp.SerialPort(fname, speed=brate, timeout=0) os.environ['LEDWALL_TTY'] = fname os.system("stty -F $LEDWALL_TTY " + str(brate)) self.w, self.h = dim self.buf = [(0, 0, 0)] * self.w * self.h self.transform = Transform(*dim) gamma = float(gamma) max_gamma = 255.**gamma self.gamma_map = [ int( (1 + 2 * x**gamma / (max_gamma/255.)) //2 ) for x in xrange(256) ] for i, v in enumerate(self.gamma_map): if v == 254: self.gamma_map[i] = 253
class LedScreen(abstractled.AbstractLed): """ The low-level LED wall screen. """ def __init__(self, fname='/dev/ttyACM0', brate=1000000, dim=(12,10), gamma=2.2): """ Initialise a LedScreen object. >>> screen = LedScreen() """ if type(dim) not in (tuple, list) or len(dim) != 2: raise ValueError("Invalid dimension. Format is tuple(x,y)") abstractled.AbstractLed.__init__(self, dimension=dim, gamma=gamma) self.tty = uspp.SerialPort(fname, timeout=0) #self.tty = uspp.SerialPort(fname, speed=brate, timeout=0) os.environ['LEDWALL_TTY'] = fname os.system("stty -F $LEDWALL_TTY " + str(brate)) self.transform = Transform(*dim) self.b = [(0,0,0)] * self.w * self.h def __setitem__(self, tup, val): abstractled.AbstractLed.__setitem__(self, tup, val) waiting = self.tty.inWaiting() if waiting > 0: _ = self.tty.read(waiting) def push(self): for x in xrange(self.w): for y in xrange(self.h): i = x + y * self.w self.b[self.transform.inverse( (x, y) )] = self.buf[i] self.tty.write( ''.join(chr(g)+chr(r)+chr(b) for r,g,b in self.b) + chr(254) )
import time from skimage.draw import line from tqdm import tqdm_notebook import cv2 import sensors from occupancy_map import Map from transform import Transform from particle_filter import ParticleFilter # %% import sensor lidar = sensors.Lidar('data/sensor_data/lidar.csv', downsample_rate=1) gyro = sensors.Gyroscope('data/sensor_data/fog.csv', downsample_rate=1) encoder = sensors.Encoder('data/sensor_data/encoder.csv', downsample_rate=1) # %% initialization myMap = Map(res=1, x_range=[-1300, 1300], y_range=[-1200, 1200]) tf = Transform() # %% Main Loop pf = ParticleFilter(n_particles=1, add_noise=False) gyro_range = int(gyro.get_length() * 1) now = time.time() car_trajactory = np.zeros([2, gyro_range]) # initialize index encoder_idx = 0 update_count = 0 max_idx = min(encoder.get_length() - 1, lidar.get_length() - 1) update_now = False for gyro_idx in tqdm_notebook(range(gyro_range)): t_loop = time.time()
args = get_arguments() print(args) flic_swap_joints = [(2, 4), (1, 5), (0, 6)] lsp_swap_joints = [(8, 9), (7, 10), (6, 11), (2, 3), (1, 4), (0, 5)] mpii_swap_joints = [(12, 13), (11, 14), (10, 15), (2, 3), (1, 4), (0, 5)] for datadir, n_joints, draw_joints, swap_joints, min_dim in [ ('data/FLIC-full', 7, flic_draw_joints, flic_swap_joints, 100), ('data/lspet_dataset', 14, lsp_draw_joints, lsp_swap_joints, 50), ('data/mpii', 16, mpii_draw_joints, mpii_swap_joints, 100)]: args.datadir = datadir args.joint_num = n_joints args.min_dim = min_dim # augmentation setting trans = Transform(args, swap_joints) # test data test_fn = '%s/train_joints.csv' % args.datadir test_dl = np.array([l.strip() for l in open(test_fn).readlines()]) result_dir = '%s/test_trans' % args.datadir if not os.path.exists(result_dir): os.makedirs(result_dir) for i, line in enumerate(test_dl): orig, input_data, label = load_data(trans, args, line) input_data = input_data.transpose( (0, 2, 3, 1))[0].astype(np.float32) label = label.astype(np.float32).flatten() cv.imwrite('%s/%d_orig.jpg' % (result_dir, i), orig) img, label = trans.revert(input_data, label)
def reflectZ3D(self): for i in range(len(self.points)): self.points[i] = Transform().reflectZ3D(self.points[i]) return self.points
type=int, default=5, help='slide an image when cropping') parser.add_argument( '--lcn', type=bool, default=True, help='local contrast normalization for data augmentation') parser.add_argument('--joint_num', type=int, default=7) args = parser.parse_args() print(args) # augmentation setting trans = Transform(padding=[args.crop_pad_inf, args.crop_pad_sup], flip=args.flip, size=args.size, shift=args.shift, lcn=args.lcn) # test data test_fn = '%s/test_joints.csv' % args.data_dir test_dl = np.array([l.strip() for l in open(test_fn).readlines()]) result_dir = '%s/test_trans' % args.data_dir if not os.path.exists(result_dir): os.makedirs(result_dir) for i, line in enumerate(test_dl): orig, input_data, label = load_data(trans, args, line) input_data = input_data.transpose((0, 2, 3, 1))[0].astype(np.float32) label = label.astype(np.float32).flatten() cv.imwrite('%s/%d_orig.jpg' % (result_dir, i), orig)
def projectionYX3D(self): for i in range(len(self.points)): self.points[i] = Transform().projectionYX3D(self.points[i]) return self.points
def scaleX3D(self, x): for i in range(len(self.points)): self.points[i] = Transform().scaleX3D(self.points[i], x) return self.points
def var_transform( ): # for transform whole df passed, not the one with summary Tf = Transform(idx, df, par_fpath) Tf.main_method(outpath)
def create_data_dictionary(self, row): """Converts mysql data into dictionary of neo4j properties.""" properties = OrderedDict() today_date = datetime.now().strftime("%Y-%m-%d") data = Transform.map_data(self.neo4j_mapping, row) for key, value in self.neo4j_properties.iteritems(): try: if key == 'PCreatedDate': properties.update({'PCreatedDate': today_date}) elif key == 'UpdatedDate': properties.update({'UpdatedDate': today_date}) elif key == 'Gender': flag = data.get('Gender') if flag == 2: properties.update({'Gender': 'Female'}) elif flag == 1: properties.update({'Gender': 'Male'}) else: properties.update({'Gender': ''}) elif key == 'BornOnMonth': BornOn = data.get('BornOn') if BornOn: month = str(BornOn.month) properties.update({'BornOnMonth': month}) elif key == 'BornOnYear': BornOn = data.get('BornOn') if BornOn: year = str(BornOn.year) properties.update({'BornOnYear': year}) elif key == 'DiedOnYear': DiedOn = data.get('DiedOn') if DiedOn: year = str(DiedOn.year) properties.update({'DiedOnYear': year}) elif key == 'DiedOnMonth': DiedOn = data.get('DiedOn') if DiedOn: month = str(DiedOn.month) properties.update({'DiedOnMonth': month}) elif key == 'CBUrl': relative_path = data.get('CBUrl') if relative_path: relative_path = Transform.to_str(relative_path) absolute_path = 'https://www.crunchbase.com/{}'.format( relative_path) properties.update({'CBUrl': absolute_path}) else: val = data.get(key) properties.update({key: val}) except Exception as e: self.logg(debug_msg='Error while perparing data dictionary.', info_msg='Function = create_data_dictionary()', warning_msg= 'Data transformation to key, value pair failed.', error_msg='Module = ' + log_file, critical_msg=str(e)) continue return properties
def test_gps_dms_to_dd(self): self.assertEqual(Transform.gps_dms_to_dd((46.0, 3.0, 5.0436)), 46.051401) self.assertEqual(Transform.gps_dms_to_dd((14.0, 30.0, 22.8132)), 14.506337)
def __init__(self): self.transform = Transform() self.components = []
class LedScreen(object): """ The low-level LED wall screen. """ def __init__(self, fname='/dev/ttyACM0', brate=1000000, dim=(12, 10), gamma=2.2): """ Initialise a LedScreen object. >>> screen = LedScreen() """ if type(dim) not in (tuple, list) or len(dim) != 2: raise ValueError("Invalid dimension. Format is tuple(x,y)") self.tty = uspp.SerialPort(fname, timeout=0) #self.tty = uspp.SerialPort(fname, speed=brate, timeout=0) os.environ['LEDWALL_TTY'] = fname os.system("stty -F $LEDWALL_TTY " + str(brate)) self.w, self.h = dim self.buf = [(0, 0, 0)] * self.w * self.h self.transform = Transform(*dim) gamma = float(gamma) max_gamma = 255.**gamma self.gamma_map = [ int((1 + 2 * x**gamma / (max_gamma / 255.)) // 2) for x in xrange(256) ] for i, v in enumerate(self.gamma_map): if v == 254: self.gamma_map[i] = 253 def gamma_correct(self, colour): """ Returns gamma-corrected colour. """ return tuple(self.gamma_map[c] for c in colour) def __setitem__(self, tup, val): """ Allows for easy frame access. Use like: >>> screen[(x, y)] = r, g, b """ if type(tup) not in (tuple, list) or len(tup) != 2: raise ValueError("tup should be a tuple of length 2") if type(val) not in (tuple, list) or len(val) != 3: raise ValueError("val should be a tuple of length 3") if tup[0] not in range(0, self.w) or tup[1] not in range(0, self.h): raise ValueError("tup should be inside the grid:", (self.w, self.h)) self.buf[self.transform.inverse(tup)] = self.gamma_correct(val) waiting = self.tty.inWaiting() if waiting > 0: _ = self.tty.read(waiting) def push(self): """ Push the current frame contents to the screen """ self.tty.write(''.join( chr(g) + chr(r) + chr(b) for r, g, b in self.buf) + chr(254)) def load_data(self, data): """ Load byte array to framebuffer. Does not send anything yet. """ for i in xrange(min(len(data) / 3, self.w * self.h)): x, y = i % self.w, i // self.w self[(x, y)] = tuple(ord(x) for x in data[i * 3:(i + 1) * 3]) def push_data(self, data): """ Push byte array to the screen. """ self.load_data(data) self.push() def load_frame(self, frame): """ Load three-dimensional array to framebuffer. Does not send anything yet. >>> _ = (0,0,0) # black >>> X = (0,255,0) # green >>> frame = [ ... [_,_,_,_,_,_,_,_,_,_,_,_,], ... [_,_,_,X,_,_,_,_,_,X,_,_,], ... [_,_,_,_,X,_,_,_,X,_,_,_,], ... [_,_,_,X,X,X,X,X,X,X,_,_,], ... [_,_,X,X,_,X,X,X,_,X,X,_,], ... [_,X,X,X,X,X,X,X,X,X,X,X,], ... [_,X,_,X,X,X,X,X,X,X,_,X,], ... [_,X,_,X,_,_,_,_,_,X,_,X,], ... [_,_,_,_,X,X,_,X,X,_,_,_,], ... [_,_,_,_,_,_,_,_,_,_,_,_,], ... ], >>> screen.load_frame(frame) # doesn't write yet >>> screen.push() # display """ for y in xrange(max(len(frame), self.h)): for x in xrange(max(len(frame[y]), self.w)): self[(x, y)] = frame[y][x] def push_frame(self, frame): """ Push a three-dimensional array to the screen >>> _ = (0,0,0) # black >>> X = (0,255,0) # green >>> frame = [ ... [_,_,_,_,_,_,_,_,_,_,_,_,], ... [_,_,_,X,_,_,_,_,_,X,_,_,], ... [_,_,_,_,X,_,_,_,X,_,_,_,], ... [_,_,_,X,X,X,X,X,X,X,_,_,], ... [_,_,X,X,_,X,X,X,_,X,X,_,], ... [_,X,X,X,X,X,X,X,X,X,X,X,], ... [_,X,_,X,X,X,X,X,X,X,_,X,], ... [_,X,_,X,_,_,_,_,_,X,_,X,], ... [_,_,_,_,X,X,_,X,X,_,_,_,], ... [_,_,_,_,_,_,_,_,_,_,_,_,], ... ], >>> screen.push_frame(frame) # display invader """ self.load_frame(frame) self.push()
def __init__(self, matrix): Transform.__init__(self, matrix) self.document_total = len(self.matrix)
def __init__(self): super(Camera, self).__init__() self.transform = Transform() self.transform.pos = vec3(-2.0, 3.0, -7.0) self.transform.rot = quat_cast( lookAt(self.transform.pos, vec3(0.0), UP))
def __init__(self): Transform.__init__(self, reverse = True)
def test_gps_dd_to_dms(self): self.assertEqual(Transform.gps_dd_to_dms(46.051401), (46.0, 3.0, 5.0436)) self.assertEqual(Transform.gps_dd_to_dms(14.506337), (14.0, 30.0, 22.8132))
def transform_block(self, stmts): self.available_expressions.push() new_stmts = Transform.transform_block(self, stmts) self.available_expressions.pop() return new_stmts
def __init__(self, attrs, matrix): self.attrs = attrs self.matrix = matrix if 'transform' in self.attrs: self.matrix *= Transform().createMatrix(self.attrs['transform'])
def setUp(self): self.app = Transform()
def create_data_dictionary(self, row): """Converts mysql data into dictionary of neo4j properties.""" properties = OrderedDict() today_date = datetime.now().strftime("%Y-%m-%d") data = Transform.map_data(self.neo4j_mapping, row) for key, value in self.neo4j_properties.iteritems(): try: if key == 'UpdatedDate': properties.update({'UpdatedDate':today_date}) elif key == 'AnnoundedOnYear': AnnouncedOn = data.get('AnnoundedOn') if AnnouncedOn: year = str(AnnouncedOn.year) properties.update({'AnnoundedOnYear':year}) elif key == 'CompletedOnYear': CompletedOn = data.get('CompletedOn') if CompletedOn: year = str(CompletedOn.year) properties.update({'CompletedOnYear':year}) elif key == 'CompletedOnMonth': CompletedOn = data.get('CompletedOn') if CompletedOn: month = str(CompletedOn.month) properties.update({'CompletedOnMonth':month}) elif key == 'AnnoundedOnMonth': AnnouncedOn = data.get('AnnoundedOn') if AnnouncedOn: month = str(AnnouncedOn.month) properties.update({'AnnoundedOnMonth':month}) elif key == 'OrganizationAcquireeID': org_id = data.get('OrganizationAcquireeID') permalink, primary_role = self.get_organization_permalink(org_id) if permalink and primary_role: label = Transform.get_neo4j_label(primary_role) properties.update({'OrganizationAcquireeID':permalink}) properties.update({'AcquireePrimaryRole':label}) else: properties.update({'OrganizationAcquireeID':permalink}) properties.update({'AcquireePrimaryRole':primary_role}) elif key == 'OrganizationAcquirerID': org_id = data.get('OrganizationAcquirerID') permalink, primary_role = self.get_organization_permalink(org_id) if permalink and primary_role: label = Transform.get_neo4j_label(primary_role) properties.update({'OrganizationAcquirerID':permalink}) properties.update({'AcquirerPrimaryRole':label}) else: properties.update({'OrganizationAcquirerID':permalink}) properties.update({'AcquirerPrimaryRole':primary_role}) else: val = data.get(key) properties.update({key:val}) except Exception as e: self.logg(debug_msg = 'Error while perparing data dictionary.', info_msg = 'Function = create_data_dictionary()', warning_msg = 'Data transformation to key, value pair failed.', error_msg = 'Module = '+log_file, critical_msg = str(e)) continue return properties
def rotateZ3D(self, ang): for i in range(len(self.points)): self.points[i] = Transform().rotateZ3D(self.points[i], ang) return self.points
def create_query_data(self, data): """Creates dynamic string for node properties and match condition.""" try: properties = [] first_key = [] second_key = [] first_label = [] second_label = [] for key, value in data.iteritems(): key = Transform.to_str(key) value = Transform.to_str(value) if key in self.unique_keys: if key == 'OrganizationAcquirerID': item = '{}:"{}"'.format('Permalink',value) first_key.append(item) continue elif key == 'OrganizationAcquireeID': item = '{}:"{}"'.format('Permalink',value) second_key.append(item) continue elif key == 'AcquirerPrimaryRole': first_label.append(value) continue elif key == 'AcquireePrimaryRole': second_label.append(value) continue if key in self.to_integer and (value or value == 0): value = Transform.to_integer(value) item = '{}:{}'.format(key,value) properties.append(item) continue if value != 0 and not value: continue if isinstance(value, str) and '"' in value: value = value.replace('"','') item = '{}:"{}"'.format(key,value) properties.append(item) property_string = ','.join(properties) first_key_string = ','.join(first_key) second_key_string = ','.join(second_key) first_label_string = ','.join(first_label) second_label_string = ','.join(second_label) except Exception as e: self.logg(debug_msg = 'Error while perparing properties.', info_msg = 'Function = create_query_data()', warning_msg = 'Data to properties string failed.', error_msg = 'Module = '+log_file, critical_msg = str(e)) else: return first_label_string, first_key_string, second_label_string, second_key_string, property_string
def translate3D(self, x, y, z): for i in range(len(self.points)): self.points[i] = Transform().translate3D(self.points[i], x, y, z) return self.points
U, S, V = linalg.svd(sigma) components = np.dot(np.dot(U, np.diag(1 / np.sqrt(S))), U.T) whiten = np.dot(mdata, components.T) return components, mean, whiten if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--outdir', type=str, default='data') parser.add_argument('--whitening', type=int, default=0) parser.add_argument('--norm', type=int, default=1) args = parser.parse_args() print(args) trans = Transform(args) if not os.path.exists(args.outdir): os.mkdir(args.outdir) # prepare training dataset data = np.zeros((50000, 3 * 32 * 32), dtype=np.float) labels = [] for i, data_fn in enumerate( sorted(glob.glob('cifar-10-batches-py/data_batch*'))): batch = unpickle(data_fn) data[i * 10000:(i + 1) * 10000] = batch['data'] labels.extend(batch['labels']) if args.whitening == 1: components, mean, data = preprocessing(data)
def shear3D(self, ang): for i in range(len(self.points)): self.points[i] = Transform().shear3D(self.points[i], ang) return self.points
def export_image(self, out_path, out_format='PNG', image_width=2000, image_height=2000): if self.map is None: return None if self.bbox.coor_system.lower() == 'gps': # transform to Gauss-Krueger gk1 = Transform.wgs84_to_gk(self.bbox.min_lat, self.bbox.min_lon, 0) # left bottom gk2 = Transform.wgs84_to_gk(self.bbox.max_lat, self.bbox.max_lon, 0) # right top else: gk1 = [self.bbox.min_lat, self.bbox.min_lon] gk2 = [self.bbox.max_lat, self.bbox.max_lon] zoom = len(self.map.levels) - 1 for z in range(0, len(self.map.levels)): width, height = self.get_pixels_size(z, gk1, gk2) # print(width, height) if width >= image_width or height >= image_height: zoom = z break pixels1 = Transform.gk_to_pixel(gk1[0], gk1[1], self.map, zoom) pixels2 = Transform.gk_to_pixel(gk2[0], gk2[1], self.map, zoom) # tile range start_x = pixels1[0] start_y = pixels2[1] end_x = pixels2[0] end_y = pixels1[1] # padding: left, top, right, bottom padding = (pixels1[2], pixels2[3], self.map.tile_size_x - pixels2[2], self.map.tile_size_y - pixels1[3]) if self.verbose: if self.bbox.coor_system.lower() == 'gps': print('Bounding box (WGS84): %f, %f, %f, %f' % (self.bbox.min_lat, self.bbox.min_lon, self.bbox.max_lat, self.bbox.max_lon)) print('Bounding box (GK): %d, %d, %d, %d' % # as integer !! (gk1[0], gk1[1], gk2[0], gk2[1])) print('Output image: %s (%s)' % (out_path, out_format)) print('Max image size: %dx%d' % (image_width, image_height)) print('Tiles directory: %s' % self.map.map_dir) print('Tiles range: %d, %d, %d, %d (zoom: %d)' % (start_x, start_y, end_x, end_y, zoom)) print('Padding: %d, %d, %d, %d' % padding) start_time = time.time() self.export_map_tiles(start_x, start_y, end_x, end_y, zoom, padding, out_path, out_format, image_width, image_height) elapsed_time = time.time() - start_time if self.verbose: print('Done, image exported in %0.3f seconds\n' % elapsed_time)
def video_test(args): # augmentation setting trans = Transform(args) # test data test_fn = args.datadir #test_dl = np.array([l.strip() for l in open(test_fn).readlines()]) # load model if args.gpu >= 0: cuda.get_device(args.gpu).use() model = load_model(args) if args.gpu >= 0: model.to_gpu() else: model.to_cpu() # create output dir epoch = int(re.search('epoch-([0-9]+)', args.param).groups()[0]) result_dir = os.path.dirname(args.param) out_dir = '%s/test_%d' % (result_dir, epoch) if not os.path.exists(out_dir): os.makedirs(out_dir) out_log = '%s.log' % out_dir fp = open(out_log, 'w') input_data_all, labels_all = load_video(trans, args) preds_all = [] for i in range(0, len(input_data_all), args.batchsize): if args.gpu >= 0: input_data = cuda.to_gpu( input_data_all[i:i + args.batchsize].astype(np.float32)) labels = cuda.to_gpu(labels_all[i:i + args.batchsize].astype( np.float32)) x = Variable(input_data, volatile=True) t = Variable(labels, volatile=True) model(x, t) if args.gpu >= 0: preds = cuda.to_cpu(model.pred.data) input_data = cuda.to_cpu(input_data) labels = cuda.to_cpu(labels) for n in xrange(len(input_data)): img = input_data[n].transpose((1, 2, 0)) pred = preds[n] img_pred, pred = trans.revert(img, pred) # turn label data into image coordinates label = labels[n] img_label, label = trans.revert(img, label) # create pred, label tuples img_pred = np.array(img_pred.copy()) pred = [tuple(p) for p in pred] # all limbs img_pred = draw_joints(img_pred, pred, args.draw_limb, args.text_scale) tr_fn = 'tmp/%d.png' % (i + n + 1) cv.imwrite(tr_fn, img_pred) preds_all.append(pred) save_vid_res(preds_all, out_dir, args.vidfile.split(".")[0])
TOPDIR = os.path.dirname(os.path.abspath(__file__)) def rel(file): """Helper to get a file relative to the script's directory @parm file: Relative file path. """ return os.path.join(TOPDIR, file) WIKI_PREFIX = 'Asterisk 18' API_TRANSFORMS = [ Transform(rel('api.wiki.mustache'), 'doc/rest-api/%s {{name_title}} REST API.wiki' % WIKI_PREFIX), Transform(rel('res_ari_resource.c.mustache'), 'res/res_ari_{{c_name}}.c'), Transform(rel('ari_resource.h.mustache'), 'res/ari/resource_{{c_name}}.h'), Transform(rel('ari_resource.c.mustache'), 'res/ari/resource_{{c_name}}.c', overwrite=False), ] RESOURCES_TRANSFORMS = [ Transform(rel('models.wiki.mustache'), 'doc/rest-api/%s REST Data Models.wiki' % WIKI_PREFIX), Transform(rel('ari.make.mustache'), 'res/ari.make'), Transform(rel('ari_model_validators.h.mustache'), 'res/ari/ari_model_validators.h'), Transform(rel('ari_model_validators.c.mustache'), 'res/ari/ari_model_validators.c'),
def __init__(self): Transform.__init__(self, verify = False)
# if csv = True logger.info("application ran") start = time.time() app = Extract() # Command to extract data from csv via s3 bucket: # raw_data_list = app.get_data_from_bucket("transactions/20200611132822.csv") # Commands to load data from RDS: raw_data_list = app.load_a_min() # extract output from yesterday # raw_data_list = app.load_yesterdays_data() # extract output from yesterday # raw_data_list = app.load_all_data() # extract output from all time end_extract = time.time() extract_time = round(end_extract - start, 4) print(f"Extract time: {extract_time}") logger.info(f"Extract time: {extract_time}") apple = Transform() transformed_data, new_drinks, new_locations, basket = apple.transform( raw_data_list ) # raw data into transform returns transformed data and drinks dic # transformed_data, basket = apple.transform(raw_data_list) # raw data into transform returns transformed data and drinks dic end_transform = time.time() transform_time = round(end_transform - end_extract, 4) logger.info(f"Transform time: {transform_time}") print(f"Transform time: {transform_time}") appley = Load() appley.save_transaction( transformed_data) # populate RDS instance with cleaned data. appley.save_drink_menu(new_drinks) # generate drinks menu appley.save_location_menu(new_locations) # generate locations menu
def __init__(self, rename_dict): Transform.__init__(self, require_types = False) self.rename_dict = rename_dict
def test(args): # augmentation setting trans = Transform(args) # test data test_fn = '%s/test_joints.csv' % args.datadir test_dl = np.array([l.strip() for l in open(test_fn).readlines()]) # load model if args.gpu >= 0: cuda.get_device(args.gpu).use() model = load_model(args) if args.gpu >= 0: model.to_gpu() else: model.to_cpu() # create output dir epoch = int(re.search('epoch-([0-9]+)', args.param).groups()[0]) result_dir = os.path.dirname(args.param) out_dir = '%s/test_%d' % (result_dir, epoch) if not os.path.exists(out_dir): os.makedirs(out_dir) out_log = '%s.log' % out_dir fp = open(out_log, 'w') mean_error = 0.0 N = len(test_dl) for i in range(0, N, args.batchsize): lines = test_dl[i:i + args.batchsize] input_data, labels = load_data(trans, args, lines) if args.gpu >= 0: input_data = cuda.to_gpu(input_data.astype(np.float32)) labels = cuda.to_gpu(labels.astype(np.float32)) else: input_data = input_data.astype(np.float32) labels = labels.astype(np.float32) x = Variable(input_data, volatile=True) t = Variable(labels, volatile=True) model(x, t) if args.gpu >= 0: preds = cuda.to_cpu(model.pred.data) input_data = cuda.to_cpu(input_data) labels = cuda.to_cpu(labels) else: preds = model.pred.data for n, line in enumerate(lines): img_fn = line.split(',')[args.fname_index] img = input_data[n].transpose((1, 2, 0)) pred = preds[n] img_pred, pred = trans.revert(img, pred) # turn label data into image coordinates label = labels[n] img_label, label = trans.revert(img, label) # calc mean_error error = np.linalg.norm(pred - label) / len(pred) mean_error += error # create pred, label tuples img_pred = np.array(img_pred.copy()) img_label = np.array(img_label.copy()) pred = [tuple(p) for p in pred] label = [tuple(p) for p in label] # all limbs img_label = draw_joints(img_label, label, args.draw_limb, args.text_scale) img_pred = draw_joints(img_pred, pred, args.draw_limb, args.text_scale) msg = '{:5}/{:5} {}\terror:{}\tmean_error:{}'.format( i + n, N, img_fn, error, mean_error / (i + n + 1)) print(msg, file=fp) print(msg) fn, ext = os.path.splitext(img_fn) tr_fn = '%s/%d-%d_%s_pred%s' % (out_dir, i, n, fn, ext) la_fn = '%s/%d-%d_%s_label%s' % (out_dir, i, n, fn, ext) cv.imwrite(tr_fn, img_pred) cv.imwrite(la_fn, img_label)
def __init__(self, mobject, **kwargs): mobject.sort_points(np.linalg.norm) mob_copy = mobject.copy() mob_copy.sort_points(lambda p: -np.linalg.norm(p)) Transform.__init__(self, mobject, mob_copy, **kwargs)
from transform import Transform from load import Load from log import logger # logger = logging.getLogger(__name__) if __name__ == "__main__": logger.info("application ran") start = time.time() app = Extract() raw_data_list = app.load_data() # extract output end_extract = time.time() extract_time = round(end_extract - start, 4) print(f"Extract time: {extract_time}") logger.info(f"Extract time: {extract_time}") apple = Transform() transformed_data, transformed_drink_menu_data = apple.transform( raw_data_list ) # raw data into transform returns transformed data and drinks dic end_transform = time.time() transform_time = round(end_transform - end_extract, 4) logger.info(f"Transform time: {transform_time}") print(f"Transform time: {transform_time}") appley = Load() appley.save_transaction( transformed_data) # populate RDS instance with cleaned data. appley.save_drink_menu(transformed_drink_menu_data) # generate drinks menu end_load = time.time()
def post(self): data = request.get_json(force=True) get_results = Transform().transform(data) return jsonify(output=get_results)
def template_main_func(): extract = Extract() transform = Transform() load = Load() record = Record()