def placeOrder(): with requests.Session() as c: try: print('\nGet LOGINPAGE') r = c.get(RequestLogin.url) checkStatus(r) BP() print('\nPost Anonymous') r = c.post(RequestAnonymous.url, headers=RequestAnonymous.header) checkStatus(r) print('\nPost Login') r = c.post(RequestLoginLogin.url, headers=RequestLoginLogin.header, data=RequestLoginLogin.data) checkStatus(r) print('\nPost Order') RequestOrder.header['ntag'] = r.headers[ 'ntag'] #update header with earlier response r = c.post(RequestOrder.url, headers=RequestOrder.header, data=RequestOrder.data) checkStatus(r) except Exception as e: print("ERROR in function", inspect.stack()[0][3] + ': ' + str(e)) handleError(str(e), '')
def channels_at_xy(x, y, shape): def func(tens): res = tens[:, :, x, y] #BP() return res BP() return kl.Lambda(func, output_shape=(1, ) + shape[1])
def __delitem__(self, key): """ Destroy constraint(s). key must be hashable.""" try: # self.bbase.world.removeConstraint(self[key]) self.bbase.remove(self[key]) except AttributeError: BP() pass # Do normal dict delete. super(self.__class__, self).__delitem__(key)
def print_results(self, valid_input, valid_output_0, valid_output_1): np.set_printoptions(precision=2) np.set_printoptions(suppress=True) testpred = self.model.predict(valid_input[:1]) BP() preds = self.model.predict(valid_input, batch_size=32) for i in range(len(preds[0])): tstr = 'color0: %s pred: %s || color1: %s pred: %s ' \ % (str(valid_output_0[i]), str(preds[0][i]), str(valid_output_1[i]), str(preds[1][i])) print(tstr)
def __setitem__(self, key, val): """ Adds constraint.""" # First check that the constraint is valid. if not isinstance(val, BulletConstraint): raise TypeError("Bad type: %s" % type(val)) # Attach it to self.bbase. try: # self.bbase.world.attachConstraint(val) self.bbase.attach(val) except AttributeError: BP() pass # Then do normal dict add. super(self.__class__, self).__setitem__(key, val)
def create_model(self): """ Initialize modelnode. GSOs should not have non-resource nodes parented to them because 'destroy_model' removes all descendant nodes with tag 'model'.""" model_name = self.get_model() try: # Load the model from disk. node = self.loader.load_model(model_name) except NameError: # Probably won't enter here, but if so it needs to be debugged. BP() pass else: if node is None: raise LoaderError("Could not find model: %s" % model_name) node.setName(path(model_name).basename()) NodePath(node).reparentTo(self) self.clear_materials() self.setTag("resource", "model")
def _compute_shapes(self): """ Computes shapes from self.components.""" # Compute mass and center-of-mass. masses = [] poses = [] psos = self.descendants(depths=[1], type_=PSO) parent = self.getParent() for pso in psos: mass = pso.get_mass() pos = pso.get_pos(parent) if mass == 0.: com = pos break poses.append(pos) masses.append(mass) else: mass = np.sum(masses) com = Point3(*(np.sum(np.array(poses).T * masses, axis=-1) / mass)) self.set_mass(mass) with self._preserve_child_tranforms() as parent: self.set_pos(parent, com) # Add shapes from PSOs. vals = [] for pso in psos: shapes0 = ShapeList(pso.get_shape()) for shape0 in shapes0: name = shape0.name args0, xform0 = shape0 if name != "Box": print("Can't handle that shape: %s" % name) BP() shape = ShapeManager.make1((name, args0, xform0)) shape.transform(pso, other=self) # scale = pso.get_scale(self) # pos = pso.get_pos(self) # quat = pso.get_quat(self) # shape.scale(scale) # shape.shift(pos, quat) val = (name, shape[0], shape[1]) vals.append(val) # Set compound object's shapes tag. self.set_shape(vals)
def bp(self, task): """ Task: break.""" BP() return task.done
def repel(self, n_steps=1000, thresh=10, step_size=0.01): """ Performs n_steps physical "repel" steps. """ @contextmanager def repel_context(world): """ Sets up a repel context. Gets the bodies, turns off gravity, rescales the masses, sets up the collision notification callback. """ def change_contact_thresh(bodies, thresh=0.001): """ Adjust the contact processing threshold. This is used to make the objects not trigger collisions when just barely touching.""" if isinstance(thresh, Iterable): it = izip(bodies, thresh) else: it = ((body, thresh) for body in bodies) thresh0 = [] for body, th in it: thresh0.append(body.getContactProcessingThreshold()) body.setContactProcessingThreshold(th) return thresh0 def rescale_masses(bodies): """ Rescale the masses so they are proportional to the volume.""" masses, inertias = zip(*[(body.getMass(), body.getInertia()) for body in bodies]) volumefac = 1. for body, mass, inertia in zip(bodies, masses, inertias): # Compute the mass-normalized diagonal elements of the # inertia tensor. if mass > 0.: it = inertia / mass * 12 # Calculate volume from the mass-normalized # inertia tensor (from wikipedia). h = sqrt((it[0] - it[1] + it[2]) / 2) w = sqrt(it[2] - h ** 2) d = sqrt(it[1] - w ** 2) volume = h * w * d # Change the mass. body.setMass(volume * volumefac) return masses # Get the bodies. bodies = world.getRigidBodies() # Turn gravity off. gravity = world.getGravity() world.setGravity(Vec3.zero()) # Tighten the contact processing threshold slightly. delta = -0.001 cp_thresh = change_contact_thresh(bodies, thresh=delta) # Adjust masses. masses = rescale_masses(bodies) # Adjust sleep thresholds. deactivations = [b.isDeactivationEnabled() for b in bodies] for body in bodies: body.setDeactivationEnabled(False) # Zero out velocities. self.attenuate_velocities(bodies) # Collisions monitor. collisions = CollisionMonitor(world) collisions.push_notifiers(bodies) ## Finish __enter__. yield bodies, collisions ## Start __exit__. collisions.pop_notifiers() # Zero out velocities. self.attenuate_velocities(bodies) # Restore the contact processing threshold. change_contact_thresh(bodies, thresh=cp_thresh) # Set masses back. for body, mass in zip(bodies, masses): body.setMass(mass) # Turn gravity back on. world.setGravity(gravity) for body, d in zip(bodies, deactivations): body.setDeactivationEnabled(d) # Operate in a context that changes the masses, turns off # gravity, adds collision monitoring callback, etc. with repel_context(self.world) as (bodies, collisions): # Loop through the repel simulation. done_count = 0 for istep in xrange(n_steps): # Take one step. self.world.doPhysics(step_size, 1, step_size) # HACK: The following can be removed once Panda3d 1.9 # is installed (and the method can be removed from # CollisionMonitor). collisions.detect18() # Increment done_count, only if there are no contacts. if collisions: done_count = 0 else: done_count += 1 if any(body.getMass() > 0. and not body.isActive() for body in bodies): BP() # Stop criterion. if done_count >= thresh: break # Zero-out/re-scale velocities. linvelfac = bool(collisions) * 0.001 angvelfac = bool(collisions) * 0.001 self.attenuate_velocities(bodies, linvelfac, angvelfac) # Reset collisions. collisions.reset() return istep
def main(): if len(sys.argv) == 1: usage(True) global GRIDSIZE, RESOLUTION parser = argparse.ArgumentParser(usage=usage()) parser.add_argument("--gridsize", required=True, type=int) parser.add_argument("--epochs", required=False, default=10, type=int) parser.add_argument("--rate", required=False, default=0, type=float) parser.add_argument("--visualize", required=False, action='store_true') args = parser.parse_args() GRIDSIZE = args.gridsize RESOLUTION = GRIDSIZE * 2 * 2 * 2 * 2 model = GCountModel(RESOLUTION, GRIDSIZE, BATCH_SIZE, args.rate) if args.visualize or not args.epochs: if os.path.exists(WEIGHTSFILE): print('Loading weights from file %s...' % WEIGHTSFILE) model.model.load_weights(WEIGHTSFILE) else: if os.path.exists(MODELFILE): print('Loading model from file %s...' % MODELFILE) model.model = km.load_model(MODELFILE, custom_objects={"th": th}) if args.rate: model.model.optimizer.lr.set_value(args.rate) print('Reading data...') images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION)) output = ut.get_output_by_key(SCRIPTPATH, 'stones') #----------------------------------------------------------- # Reshape targets to look like the flattened network output tt = output['valid_output'] valid_output = np.array([[ x.tolist().count(EMPTY), x.tolist().count(WHITE), x.tolist().count(BLACK) ] for x in tt]) tt = output['train_output'] train_output = np.array([[ x.tolist().count(EMPTY), x.tolist().count(WHITE), x.tolist().count(BLACK) ] for x in tt]) means, stds = ut.get_means_and_stds(images['train_data']) ut.normalize(images['train_data'], means, stds) ut.normalize(images['valid_data'], means, stds) # Visualization #----------------- if args.visualize: print('Dumping conv layer images to jpg') visualize_channels(model.model, 'lastconv', range(0, 3), images['train_data'][700:701], 'lastconv0.jpg') visualize_channels(model.model, 'lastconv', range(0, 3), images['train_data'][500:501], 'lastconv1.jpg') visualize_channels(model.model, 'lastconv', range(0, 3), images['train_data'][400:401], 'lastconv2.jpg') visualize_channels(model.model, 'lastconv', range(0, 3), images['train_data'][300:301], 'lastconv3.jpg') visualize_channels(model.model, 'lastconv', range(0, 3), images['train_data'][200:201], 'lastconv4.jpg') exit(0) # If no epochs, just print output and what it should have been if not args.epochs: idx = 0 print('lastconv') xx = ut.get_output_of_layer(model.model, 'lastconv', images['train_data'][idx:idx + 1]) print(xx) print('count_e') xx = ut.get_output_of_layer(model.model, 'count_e', images['train_data'][idx:idx + 1]) print(xx) print('count_w') xx = ut.get_output_of_layer(model.model, 'count_w', images['train_data'][idx:idx + 1]) print(xx) print('count_b') xx = ut.get_output_of_layer(model.model, 'count_b', images['train_data'][idx:idx + 1]) print(xx) print('out') xx = model.model.predict(images['train_data'][idx:idx + 1], batch_size=1) print(xx) print('target') print(train_output[idx:idx + 1]) BP() # Train if args.epochs: print('Start training...') model.train(images['train_data'], train_output, images['valid_data'], valid_output, BATCH_SIZE, args.epochs) model.model.save_weights(WEIGHTSFILE) model.model.save(MODELFILE)
def main(): if len(sys.argv) == 1: usage(True) global GRIDSIZE, RESOLUTION RESOLUTION = GRIDSIZE * 2 * 2 * 2 parser = argparse.ArgumentParser(usage=usage()) parser.add_argument("--gridsize", required=True, type=int) parser.add_argument("--epochs", required=False, default=10, type=int) parser.add_argument("--rate", required=False, default=0, type=float) parser.add_argument("--visualize", required=False, action='store_true') args = parser.parse_args() GRIDSIZE = args.gridsize RESOLUTION = GRIDSIZE * 2 * 2 * 2 model = LambdaModel(RESOLUTION, GRIDSIZE, args.rate) if args.visualize or not args.epochs: if os.path.exists(WEIGHTSFILE): print('Loading weights from file %s...' % WEIGHTSFILE) model.model.load_weights(WEIGHTSFILE) else: if os.path.exists(MODELFILE): print('Loading model from file %s...' % MODELFILE) model.model = km.load_model(MODELFILE) if args.rate: model.model.optimizer.lr.set_value(args.rate) print('Reading data...') images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION)) output = ut.get_output_by_key(SCRIPTPATH, 'stones') # Debug #---------------------------------------------------- #last_conv_model = km.Model(inputs=model.model.input, # outputs=model.model.get_layer('lastconv').output) #tt = last_conv_model.predict(images['valid_data'][:1]) #xx = model.model.predict(images['valid_data'][:1]) #BP() #----------------------------------------------------------- # Reshape targets to look like the flattened network output tt = output['valid_output'] valid_output = np.array([ np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3) for x in tt ]) tt = output['train_output'] train_output = np.array([ np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3) for x in tt ]) means, stds = ut.get_means_and_stds(images['train_data']) ut.normalize(images['train_data'], means, stds) ut.normalize(images['valid_data'], means, stds) fname = output['train_filenames'][0] #tt = get_output_of_layer(model.model, 'lastconv', images['train_data'][:1]) if not args.epochs: idx = 0 xx = get_output_of_layer(model.model, 'out', images['train_data'][idx:idx + 1]) print(xx) print(train_output[idx:idx + 1]) BP() if args.visualize: print('Dumping conv layer images to jpg') visualize(model, 'classconv', images['train_data'], ['train/' + x for x in meta['train_filenames']]) exit(0) # Train if args.epochs: print('Start training...') model.train(images['train_data'], train_output, images['valid_data'], valid_output, BATCH_SIZE, args.epochs) model.model.save_weights(WEIGHTSFILE) model.model.save(MODELFILE)
def main(): if len(sys.argv) == 1: usage(True) global GRIDSIZE, RESOLUTION RESOLUTION = GRIDSIZE * 2 * 2 * 2 parser = argparse.ArgumentParser(usage=usage()) parser.add_argument("--gridsize", required=True, type=int) parser.add_argument("--epochs", required=False, default=10, type=int) parser.add_argument("--rate", required=False, default=0, type=float) parser.add_argument("--visualize", required=False, action='store_true') args = parser.parse_args() GRIDSIZE = args.gridsize RESOLUTION = GRIDSIZE * 2 * 2 * 2 * 2 model = GoogleModel(RESOLUTION, GRIDSIZE, args.rate) if args.visualize or not args.epochs: if os.path.exists(WEIGHTSFILE): print('Loading weights from file %s...' % WEIGHTSFILE) model.model.load_weights(WEIGHTSFILE) else: if os.path.exists(MODELFILE): print('Loading model from file %s...' % MODELFILE) model.model = km.load_model(MODELFILE) if args.rate: model.model.optimizer.lr.set_value(args.rate) print('Reading data...') images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION)) output = ut.get_output_by_key(SCRIPTPATH, 'stones') #----------------------------------------------------------- # Reshape targets to look like the flattened network output tt = output['valid_output'] valid_output = np.array([ np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3) for x in tt ]) tt = output['train_output'] train_output = np.array([ np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3) for x in tt ]) means, stds = ut.get_means_and_stds(images['train_data']) ut.normalize(images['train_data'], means, stds) ut.normalize(images['valid_data'], means, stds) # Visualization #----------------- if args.visualize: print('Dumping conv layer images to jpg') visualize_channels(model.model, 'lastconv', range(0, 3), images['valid_data'][42:43], 'lastconv.jpg') exit(0) # If no epochs, just print output and what it should have been if not args.epochs: idx = 0 xx = ut.get_output_of_layer(model.model, 'out', images['train_data'][idx:idx + 1]) print(xx) print(train_output[idx:idx + 1]) BP() # Train if args.epochs: print('Start training...') model.train(images['train_data'], train_output, images['valid_data'], valid_output, BATCH_SIZE, args.epochs) model.model.save_weights(WEIGHTSFILE) model.model.save(MODELFILE)
def run(): """ Wrap all functionality in run() function to handle exceptions without going to Blender.""" def is_int(s): """ Returns bool indicating whether s can be converted into an int.""" try: int(s) except ValueError: x = False else: x = True return x def parse_scenes(S): if S: # Convert each to either a string or an int. X = int(S) if is_int(S) else S else: X = None return X # Get first Python argument index: idx. try: idx = sys.argv.index("--") + 1 except ValueError: # "--" isn't an argument, start with next argument after this script. # Determine which argument this script is. thisfile = os.path.basename(inspect.getfile(inspect.currentframe())) idx = None for i, a in enumerate(sys.argv): if os.path.basename(a) == thisfile: idx = i + 1 if idx is None: raise argparse.ArgumentError("Cannot split argument list.") # The Python script's arguments. args = sys.argv[idx:] # Description for parser. try: description = __doc__ except NameError: description = "" # Parser object. parser = argparse.ArgumentParser(description=description) # Argument: f_anim. parser.add_argument( "--render-anim", "-a", action="store_true", default=False, help="Render frames from start to end.") # Argument: device. devices = gpu_devices + ("CPU",) parser.add_argument( "--device", "-D", choices=devices, help="Sets compute device: (%s)" % (", ".join(devices))) # Argument: samples. parser.add_argument( "--samples", default=None, type=int, help="Sets number of samples per render.") # Argument: scenes. parser.add_argument( "--scenes", nargs="*", default=[], type=parse_scenes, help="List of scene names or indices to render. Defaults to none.") # Argument: frame. parser.add_argument( "--render-frame", "-f", default=None, type=int, help="Sets frame to render.") # Argument: frame start parser.add_argument( "--frame-start", "-s", default=None, type=int, help="Sets start to frame.") # Argument: frame end parser.add_argument( "--frame-end", "-e", default=None, type=int, help="Sets end to frame.") # Argument: frame jump parser.add_argument( "--frame-jump", "-j", default=None, type=int, help="Sets number of " "frames to step forward after each rendered frame.") # Argument: output parser.add_argument( "--render-output", "-o", default=None, help="Set the render path and file name.") # Argument: f_no_kill. parser.add_argument( "--no-kill", action="store_true", default=False, help="Will not kill Blender at the end of this script.") # Create parser and parse args. try: parsed, remaining = parser.parse_known_args(args) except SystemExit as err: if not err.code: # Normal exit occurred, probably from "--help". Just kill # Blender now, because otherwise it will continue running # (and probably print Blender's help to stdout). kill_blender() else: # Abnormal exit. # Re-raise error. raise err # Exit script and exit Blender. sys.exit(err.code) bpy.ops.wm.quit_blender() BP() kill_blender() # Get parsed arguments. f_anim = parsed.render_anim device_t = parsed.device samples = parsed.samples scenes = [None if str(s).lower() in ("end", "none") else s for s in parsed.scenes] frame = parsed.render_frame start = parsed.frame_start end = parsed.frame_end jump = parsed.frame_jump output = parsed.render_output f_kill = not parsed.no_kill if bpy: # Render. blender_run(f_anim, device_t=device_t, samples=samples, scenes=scenes, frame=frame, start=start, end=end, jump=jump, output=output) else: print("** Called from outside Blender. Exiting. **") # Kill blender. This script is intended to be used as a final # command line arguments to Blender because there's Really no good # way to consume the Python arguments and then return to take more # Blender arguments. Command line arg "--no-kill" can be used to # avoid this. if f_kill: kill_blender()
for s in parsed.scenes] frame = parsed.render_frame start = parsed.frame_start end = parsed.frame_end jump = parsed.frame_jump output = parsed.render_output f_kill = not parsed.no_kill if bpy: # Render. blender_run(f_anim, device_t=device_t, samples=samples, scenes=scenes, frame=frame, start=start, end=end, jump=jump, output=output) else: print("** Called from outside Blender. Exiting. **") # Kill blender. This script is intended to be used as a final # command line arguments to Blender because there's Really no good # way to consume the Python arguments and then return to take more # Blender arguments. Command line arg "--no-kill" can be used to # avoid this. if f_kill: kill_blender() if __name__ == "__main__": ## Cmd line interface. try: run() except: print("\n\nError generated from Python script.") BP()