def test_001_transform_constructor(): try: a = t.Transform() except AssertionError: return else: assert (False) # should have died
def _startElement(self, name, attrs): nodeName = attrs.get('name', None) if (name == 'transform'): t = transform.Transform() t.takeParser(self._parser, self, attrs) elif (name == 'geom'): g = geom.Geom(nodeName, self) g.takeParser(self._parser) elif (name == 'group'): # parse group pass elif (name == 'body'): b = body.Body(nodeName, self, attrs) b.takeParser(self._parser) elif (name == 'jointgroup'): # parse joint group pass elif (name == 'joint'): j = joint.Joint(nodeName, self) j.takeParser(self._parser) elif (name == 'ext'): # parse ext pass else: raise errors.ChildError('space', name)
def transform_imgbox(self): ''' annotation: 1/img_01 x1 y1 x2 y2 x1 y1 x2 y2 ... ''' auger_list=["Sequential", "Fliplr","Affine","Dropout", \ "AdditiveGaussianNoise","SigmoidContrast","Multiply"] trans = transform.Transform(img_auger_list=auger_list) img_dict = dict() if self.img_org is None: print("aug img is None") return None img_aug, boxes_aug, keep_idx = trans.aug_img_boxes( self.img_org, [self.boxes.tolist()]) if not len(boxes_aug) > 0: #print("aug box is None") return None img_data = img_aug[0] boxes_trans = np.array(boxes_aug[0], dtype=np.int32).reshape(-1, 4) label = np.array(self.labels[keep_idx[1][0]], dtype=np.int32).reshape(-1, 1) #label = np.ones([boxes_trans.shape[0],1],dtype=np.int32)*NAME_LABEL_MAP['face'] #print('box',boxes_trans.shape) #print('label',np.shape(label)) gt_box_labels = np.concatenate((boxes_trans, label), axis=1) num_objects_one_img = gt_box_labels.shape[0] #gt_box_labels = np.reshape(gt_box_labels,-1) #gt_list = gt_box_labels.tolist() img_dict['img_data'] = img_data img_dict['img_shape'] = img_data.shape[:2] img_dict['gt'] = gt_box_labels #gt_list img_dict['img_name'] = self.img_prefix + '_aug' + self.img_format img_dict['num_objects'] = num_objects_one_img return img_dict
def protobuf_from_checkpoint(ckpt_file, image_shape, batch_size, output_name): #if not os.path.isfile(ckpt_file): # raise ValueError(f'File "{ckpt_file}" does not exist or is not a file.') # create the tf Session sess = tf.Session() # Compute the shape of the input placeholder. This shape is what the serialized model can # process. For other input shapes you will have to resize the images or make a new model export batch_shape = [batch_size] + image_shape img_placeholder = tf.placeholder(tf.float32, shape=batch_shape, name='img_placeholder') # create the network to the variables are in the global scope preds = transform.Transform().net(img_placeholder) # noqa saver = tf.train.Saver() # load our checkpoint into the variables saver.restore(sess, ckpt_file) # get the tf graph and retrieve operation names graph = tf.get_default_graph() op_names = [op.name for op in graph.get_operations()] # convert the protobuf GraphDef to a GraphDef that has no variables but just constants with the # current values. output_graph_def = graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), op_names) # dump GraphDef to file graph_io.write_graph(output_graph_def, './', output_name, as_text=False) sess.close()
def run_pipeline(self): print("Running Pipeline") ingest_process = ingest.Ingest() ingest_process.ingest_data() tranform_process = transform.Transform() tranform_process.transform_data() persist_process = persist.Persist() persist_process.persist_data()
def load_transforms(path): sexp = None with open(path, 'rb') as fd: parser = SchemeParser() sexp = parser.parse(fd) while sexp is not None: rule = transform.Transform(sexp.car) RULES[rule.name] = rule sexp = sexp.cdr
def __init__(self, pixels, context): super(Figure3D, self).__init__(pixels, {}) self.context = context self._transform = transform.Transform() self._points = [] self._transformed_points = [] self._make_points()
def __init__(self, shape=None): # image transform network self.transform = transform.Transform() # open session soft_config = tf.ConfigProto(allow_soft_placement=True) soft_config.gpu_options.allow_growth = True # to deal with large image self.sess = tf.Session(config=soft_config) self.shape = shape self._build_graph() self.sess.run(tf.global_variables_initializer()) self.saver = tf.train.Saver()
def run_pipeline(self): print("Running Pipeline") ingest_process = ingest.Ingest(self.spark) df = ingest_process.ingest_data() df.show() tranform_process = transform.Transform(self.spark) transformed_df = tranform_process.transform_data(df) transformed_df.show() persist_process = persist.Persist(self.spark) persist_process.persist_data(transformed_df) return
def run_pipeline(self): logging.info('run_pipeline method started') ingest_process = ingest.Ingest(self.spark) df = ingest_process.ingest_data() df.show() tranform_process = transform.Transform(self.spark) transformed_df = tranform_process.transform_data(df) transformed_df.show() persist_process = persist.Persist(self.spark) persist_process.persist_data(transformed_df) logging.info('run_pipeline method ended') return
def __init__(self, name: str, scene: G.Scene = None): self.__components = set() self.__new_components = set() self.__started = False self.name = name self.transform = self.add_component(transform.Transform()) self.transform.transform = self.transform self.__dead = False scene = scene or G.SCENE scene.add_gameobject(self) pass
def __init__(self, session, content_image, model_path): # Initialise the TF Session self.sess = session # Inputs self.x0 = content_image self.model_path = model_path # Image Transform self.transform = transform.Transform() self._build_graph()
def _startElement(self, name, attrs): nodeName = attrs.get('name', None) if (name == 'transform'): t = transform.Transform() t.takeParser(self._parser, self, attrs) elif (name == 'space'): space = Space(nodeName, self) space.takeParser(self._parser) elif (name == 'ext'): pass else: raise errors.ChildError('world', name)
def _startElement(self, name, attrs): nodeName = attrs.get('name', None) if (name == 'transform'): t = transform.Transform() t.takeParser(self._parser, self, attrs) else: self._applyTransform() if (name == 'torque'): self.getODEObject().setTorque(self._parser.parseVector(attrs)) elif (name == 'force'): self.getODEObject().setForce(self._parser.parseVector(attrs)) elif (name == 'finiteRotation'): mode = int(attrs['mode']) try: axis = (float(attrs['xaxis']), float(attrs['yaxis']), float(attrs['zaxis'])) except KeyError: raise errors.InvalidError('finiteRotation element must have' \ ' xaxis, yaxis and zaxis attributes') if (mode not in [0, 1]): raise errors.InvalidError('finiteRotation mode attribute must' \ ' be either 0 or 1.') self.getODEObject().setFiniteRotationMode(mode) self.getODEObject().setFiniteRotationAxis(axis) elif (name == 'linearVel'): self.getODEObject().setLinearVel(self._parser.parseVector(attrs)) elif (name == 'angularVel'): self.getODEObject().setAngularVel(self._parser.parseVector(attrs)) elif (name == 'mass'): self._mass = Mass(nodeName, self) self._mass.takeParser(self._parser) elif (name == 'joint'): j = joint.Joint(nodeName, self) j.takeParser(self._parser) elif (name == 'body'): b = Body(nodeName, self, attrs) b.takeParser(self._parser) elif (name == 'geom'): g = geom.Geom(nodeName, self) g.takeParser(self._parser) elif (name == 'transform'): # so it doesn't raise ChildError pass else: raise errors.ChildError('body', name)
def __init__(self, session, content_image, model_path): # session self.sess = session # input images self.x0 = content_image # input model self.model_path = model_path # image transform network self.transform = transform.Transform() # build graph for style transfer self._build_graph()
def predict(self, request: Dict) -> Dict: if not self.__loaded: self.load_model() if 'ndarray' not in request: logging.info("request json error!") return "request json error!" if self.__model: transform_obj = transform.Transform() transform_obj.transform_input(request['ndarray']) result_dict = transform_obj.transform_output(self.__model) result_dict['version'] = self.__version return result_dict else: return None
def predict(self, X, features_names=None): """ Return a prediction. Parameters ---------- X : array-like feature_names : array of feature names (optional) """ if not self.loaded: self.load() if self.model: transform_obj = transform.Transform() transform_obj.transform_input(X) return transform_obj.transform_output(self.model) else: return "less is more more more more"
def align_motif(ref_bp_state, motif_end, motif, sterics=1): """ This is the workhorse of the entire suite. Aligns one end of a motif to the reference frame and origin of a Basepair object. :param ref_bp: the base pair that the motif end is going to align too :param motif_end: the motif end basepair to overly with the ref_bp :param motif: the motif object that you want to align :type ref_bp: Basepair object :type motif_end: Basepair object :type motif: Motif object """ r1 , r2 = ref_bp_state.r , motif_end.state().r r = util.unitarize(r1.T.dot(r2)) trans = -motif_end.state().d t = transform.Transform(r, trans) motif.transform(t) bp_pos_diff = ref_bp_state.d - motif_end.state().d motif.move(bp_pos_diff) #alignment is by center of basepair, it can be slightly improved by #aligning the c1' sugars res1_coord, res2_coord = motif_end.c1_prime_coords() ref_res1_coord, ref_res2_coord = ref_bp_state.sugars dist1 = util.distance(res1_coord, ref_res1_coord) dist2 = util.distance(res2_coord, ref_res1_coord) if dist1 < dist2: sugar_diff_1 = ref_res1_coord - res1_coord sugar_diff_2 = ref_res2_coord - res2_coord else: sugar_diff_1 = ref_res1_coord - res2_coord sugar_diff_2 = ref_res2_coord - res1_coord if dist1 < 5 or dist2 < 5: motif.move( (sugar_diff_1 + sugar_diff_2) / 2 ) if sterics: motif.get_beads([motif_end])
def run_pipeline(self): try: logging.info('run_pipeline method started') ingest_process = ingest.Ingest(self.spark) df = ingest_process.ingest_data() df.show() tranform_process = transform.Transform(self.spark) transformed_df = tranform_process.transform_data(df) transformed_df.show() persist_process = persist.Persist(self.spark) persist_process.persist_data(transformed_df) logging.info('run_pipeline method ended') except Exception as exp: logging.error("An error occured while running the pipeline > " + str(exp)) # send email notification # log error to database sys.exit(1) return
def __init__(self, name, parent): """ Initialises this node. If the parent is not C{None}, parent.addChild() is called. @param name: The name of this container or C{None} if there is none. @type name: str @param parent: The parent of this node or C{None}. @type parent: instance or C{None} """ self._name = name self._parent = parent self._obj = None self._transform = transform.Transform() self._childs = [] self._namedChild = {} if (self._parent is not None): self._parent.addChild(self, name)
def _startElement(self, name, attrs): nodeName = attrs.get('name', None) if (name == 'transform'): t = transform.Transform() t.takeParser(self._parser, self, attrs) self._transformed = True elif (name == 'box'): self._parseGeomBox(attrs) elif (name == 'cappedCylinder'): self._parseGeomCCylinder(attrs) elif (name == 'cone'): raise NotImplementedError() elif (name == 'cylinder'): raise NotImplementedError() elif (name == 'plane'): self._parseGeomPlane(attrs) elif (name == 'ray'): self._parseGeomRay(attrs) elif (name == 'sphere'): self._parseGeomSphere(attrs) elif (name == 'trimesh'): self._parseTriMesh(attrs) elif (name == 'geom'): g = Geom(nodeName, self) g.takeParser(self._parser) elif (name == 'body'): b = body.Body(nodeName, self, attrs) b.takeParser(self._parser) elif (name == 'joint'): j = joint.Joint(nodename, self) j.takeParser(self._parser) elif (name == 'jointgroup'): pass elif (name == 'ext'): pass else: raise errors.ChildError('geom', name)
def __init__(self, session, content_layer_ids, style_layer_ids, content_images, style_image, net, num_epochs, batch_size, content_weight, style_weight, tv_weight, learn_rate, save_path, check_period, max_size): self.sess = session self.net = net # sort layers info self.CONTENT_LAYERS = collections.OrderedDict( sorted(content_layer_ids.items())) self.STYLE_LAYERS = collections.OrderedDict( sorted(style_layer_ids.items())) # input images self.x_list = content_images mod = len(content_images) % batch_size self.x_list = self.x_list[:-mod] self.content_size = len(self.x_list) self.y_s0 = style_image # parameters for optimization self.num_epochs = num_epochs self.content_weight = content_weight self.style_weight = style_weight self.tv_weight = tv_weight self.learn_rate = learn_rate self.batch_size = batch_size self.check_period = check_period # path for model to be saved self.save_path = save_path # image transform network self.transform = transform.Transform() # build graph for style transfer self._build_graph()
def transform_img(self): ''' annotation: 1/img_01 0 ... ''' auger_list = [ "Sequential", "Fliplr", "AdditiveGaussianNoise", "SigmoidContrast", "Multiply" ] trans = transform.Transform(img_auger_list=auger_list, class_num=cfgs.CLS_NUM) img_dict = dict() if self.img_org is None: print("aug img is None") return None img_aug = trans.aug_img(self.img_org) if not len(img_aug) > 0: #print("aug box is None") return None img_data = img_aug[0] img_dict['img_data'] = img_data img_dict['img_shape'] = img_data.shape[:2] img_dict['gt'] = self.label img_dict['img_name'] = self.img_prefix[:-4] + '_aug' + self.img_format return img_dict
def main(): parser = argparse.ArgumentParser(description='Train Deblur Network') parser.add_argument('--seed', '-s', type=int, default=0, help='seed for random values') parser.add_argument('--batchsize', '-b', type=int, default=128, help='Number of images in each mini-batch') parser.add_argument('--learnrate', '-l', type=float, default=0.1, help='Learning rate for SGD') parser.add_argument('--epoch', '-e', type=int, default=50, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') args = parser.parse_args() print('GPU: {}'.format(args.gpu)) print('# Minibatch-size: {}'.format(args.batchsize)) print('# epoch: {}'.format(args.epoch)) print(args) print('') set_random_seed(args.seed) predictor = srcnn.create_srcnn() model = L.Classifier(predictor, lossfun=F.mean_squared_error, accfun=psnr) if args.gpu >= 0: # Make a specified GPU current chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # Copy the model to the GPU optimizer = chainer.optimizers.MomentumSGD(args.learnrate) optimizer.setup(model) optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4)) base_dir = 'data/blurred_sharp' train_data = pairwise_dataset.PairwiseDataset( blur_image_list=str(Path(base_dir).joinpath('train_blur_images.txt')), sharp_image_list=str( Path(base_dir).joinpath('train_sharp_images.txt')), root=base_dir) train_data = chainer.datasets.TransformDataset(train_data, transform.Transform()) test_data = pairwise_dataset.PairwiseDataset( blur_image_list=str(Path(base_dir).joinpath('test_blur_images.txt')), sharp_image_list=str(Path(base_dir).joinpath('test_sharp_images.txt')), root=base_dir) # 普通はTransformしないような気がするけど、解像度がかわっちゃうのがなー test_data = chainer.datasets.TransformDataset(test_data, transform.Transform()) train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize) test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize, repeat=False, shuffle=False) # Set up a trainer updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) trainer.extend(extensions.FailOnNonNumber()) # Evaluate the model with the test dataset for each epoch eval_trigger = (1, 'epoch') trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu), trigger=eval_trigger) # Reduce the learning rate by half every 25 epochs. lr_drop_epoch = [int(args.epoch * 0.5), int(args.epoch * 0.75)] lr_drop_ratio = 0.1 print('lr schedule: {}, timing: {}'.format(lr_drop_ratio, lr_drop_epoch)) def lr_drop(trainer): trainer.updater.get_optimizer('main').lr *= lr_drop_ratio trainer.extend(lr_drop, trigger=chainer.training.triggers.ManualScheduleTrigger( lr_drop_epoch, 'epoch')) trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch')) # Dump a computational graph from 'loss' variable at the first iteration # The "main" refers to the target link of the "main" optimizer. trainer.extend(extensions.dump_graph('main/loss')) # Take a snapshot at each epoch trainer.extend(extensions.snapshot(model.predictor, 'model_{.updater.epoch}.npz'), trigger=(1, 'epoch')) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport(trigger=(100, 'iteration'))) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend(extensions.PrintReport([ 'epoch', 'lr', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time' ]), trigger=(100, 'iteration')) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) # interact with chainerui trainer.extend(CommandsExtension(), trigger=(100, 'iteration')) # save args save_args(args, args.out) if args.resume: # Resume from a snapshot chainer.serializers.load_npz(args.resume, trainer) # Run the training trainer.run()
# Set variables server = "localhost" database = "Fifa19" initial_load = True ### EXTRACT ### extractor = extract.Extract() my_data = extractor.query_data(server=server, database=database, table="fifa_19") df = my_data.copy() ### TRANSFORM ### transformer = transform.Transform() df = transformer.transform_data(df) ### STAR SCHEMA ### schimera = star_schema.Star_Schema() player_dim = schimera.apply_player_star_schema(df) club_dim = schimera.apply_club_star_schema(df) stats_dim = schimera.apply_stats_star_schema(df) form_dim = schimera.apply_form_star_schema(df) fifa_fact = schimera.create_fact(df) ### LOAD ### loader = load.Load()
def main(): # parse arguments args = parse_args() # window details width = args.window_size[0] height = args.window_size[1] display = (width, height) # window setup pygame.init() pygame.display.set_caption('Spout Neural Style Sender/Receiver') pygame.display.set_mode(display, DOUBLEBUF | OPENGL) # OpenGL init glMatrixMode(GL_PROJECTION) glLoadIdentity() glOrtho(0, width, height, 0, 1, -1) glMatrixMode(GL_MODELVIEW) glDisable(GL_DEPTH_TEST) glClearColor(0.0, 0.0, 0.0, 0.0) glEnable(GL_TEXTURE_2D) # init spout receiver receiverName = args.spout_name spoutReceiverWidth = args.spout_size[0] spoutReceiverHeight = args.spout_size[1] # create spout receiver spoutReceiver = SpoutSDK.SpoutReceiver() # name, width, height, use active sender spoutReceiver.pyCreateReceiver(receiverName, spoutReceiverWidth, spoutReceiverHeight, False) # init spout sender spoutSender = SpoutSDK.SpoutSender() spoutSenderWidth = args.spout_size[0] spoutSenderHeight = args.spout_size[1] spoutSender.CreateSender('Neural Style Sender', spoutSenderWidth, spoutSenderHeight, 0) # create textures for spout receiver and spout sender textureReceiveID = glGenTextures(1) textureStyleID = glGenTextures(1) # initalise receiver texture glBindTexture(GL_TEXTURE_2D, textureReceiveID) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) # copy data into texture glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, spoutReceiverWidth, spoutReceiverHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, None) glBindTexture(GL_TEXTURE_2D, 0) # open tf session soft_config = tf.ConfigProto(allow_soft_placement=True) soft_config.gpu_options.allow_growth = True # to deal with large image sess = tf.Session(config=soft_config) # build tf graph style = tf.placeholder(tf.float32, shape=[spoutSenderHeight, spoutSenderWidth, 3], name='input') styleI = tf.expand_dims(style, 0) # add one dim for batch # result image from transform-net scaler = transform.Transform() y_hat = scaler.net(styleI / 255.0) y_hat = tf.squeeze(y_hat) # remove one dim for batch y_hat = tf.clip_by_value(y_hat, 0., 255.) # initialize parameters sess.run(tf.global_variables_initializer()) # load pre-trained model saver = tf.train.Saver() saver.restore(sess, args.style_model) # loop for graph frame by frame while (True): for event in pygame.event.get(): if event.type == pygame.QUIT: spoutReceiver.ReleaseReceiver() pygame.quit() quit() #receive texture spoutReceiver.pyReceiveTexture(receiverName, spoutReceiverWidth, spoutReceiverHeight, textureReceiveID, GL_TEXTURE_2D, False, 0) glBindTexture(GL_TEXTURE_2D, textureReceiveID) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) # copy pixel byte array from received texture data = glGetTexImage(GL_TEXTURE_2D, 0, GL_RGB, GL_UNSIGNED_BYTE, outputType=None) #Using GL_RGB can use GL_RGBA glBindTexture(GL_TEXTURE_2D, 0) # swap width and height data around due to oddness with glGetTextImage. http://permalink.gmane.org/gmane.comp.python.opengl.user/2423 data.shape = (data.shape[1], data.shape[0], data.shape[2]) # start time of the loop for FPS counter start_time = time.time() #run the graph output = sess.run(y_hat, feed_dict={style: data}) # fiddle back to an image we can display. I *think* this is correct output = np.clip(output, 0.0, 255.0) output = output.astype(np.uint8) # setup the texture so we can load the stylised output into it glBindTexture(GL_TEXTURE_2D, textureStyleID) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) # copy output into texture glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, spoutSenderWidth, spoutSenderHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, output) # setup window to draw to screen glActiveTexture(GL_TEXTURE0) # clean start glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # reset drawing perspective glLoadIdentity() # draw texture on screen glBegin(GL_QUADS) glTexCoord(0, 0) glVertex2f(0, 0) glTexCoord(1, 0) glVertex2f(spoutSenderWidth, 0) glTexCoord(1, 1) glVertex2f(spoutSenderWidth, spoutSenderHeight) glTexCoord(0, 1) glVertex2f(0, spoutSenderHeight) glEnd() # update window pygame.display.flip() # Send texture to spout... spoutSender.SendTexture(textureStyleID, GL_TEXTURE_2D, spoutSenderWidth, spoutSenderHeight, False, 0) # FPS = 1 / time to process loop print("FPS: ", 1.0 / (time.time() - start_time))
def __init__(self, content_layer_ids, style_layer_ids, content_images, style_image, session, net, num_epochs, batch_size, content_weight, style_weight, tv_weight, learn_rate, save_path, check_period, test_image, max_size): self.net = net self.sess = session # sort layers info self.CONTENT_LAYERS = collections.OrderedDict( sorted(content_layer_ids.items())) self.STYLE_LAYERS = collections.OrderedDict( sorted(style_layer_ids.items())) # input images self.x_list = content_images mod = len(content_images) % batch_size if not mod: mod = 1 self.x_list = self.x_list[:-mod] self.y_s0 = style_image self.content_size = len(self.x_list) # parameters for optimization self.num_epochs = num_epochs self.content_weight = content_weight self.style_weight = style_weight self.tv_weight = tv_weight self.learn_rate = learn_rate self.batch_size = batch_size self.check_period = check_period # path for model to be saved self.save_path = save_path # image transform network self.transform = transform.Transform() self.tester = transform.Transform('test') # build graph for style transfer self._build_graph() # test during training if test_image is not None: self.TEST = True # load content image self.test_image = utils.load_image(test_image, max_size=max_size) # build graph self.x_test = tf.placeholder(tf.float32, shape=self.test_image.shape, name='test_input') self.xi_test = tf.expand_dims(self.x_test, 0) # add one dim for batch # result image from transform-net self.y_hat_test = self.tester.net( self.xi_test / 255.0 ) # please build graph for train first. tester.net reuses variables. else: self.TEST = False
GNU General Public License for more details. You should have received a copy of the GNU General Public License along with lewd. If not, see <http://www.gnu.org/licenses/>. See the file COPYING, included in this distribution, for details about the copyright. """ import asyncore import os, socket, json import sys, os sys.path.append('..') import led, transform transform = transform.Transform(12, 10) class LEDConnection(asyncore.dispatcher_with_send): def __init__(self, conn, sock, addr): asyncore.dispatcher_with_send.__init__(self, sock) self.data = '' def handle_read(self): data = self.recv(12*10*3) self.data += data if len(self.data) < 12*10*3: return screen.push_data(self.data[:12*10*3]) self.data = self.data[12*10*3:]
import util, transform ut = util.Util() tr = transform.Transform() class Marching_Cubes_2d(): def check_threshold(self, pts4, threshold): pts4_tf = [] count = 0 for i in range(4): if pts4[i] < threshold: pts4_tf.append(0) else: pts4_tf.append(1) count += 1 return pts4_tf, count def line_one(self, p0, p1, p3): vector_s = tr.vector_amp(tr.vector_2pt(p0, p1), 0.5) vector_e = tr.vector_amp(tr.vector_2pt(p0, p3), 0.5) point_s = tr.point_move(p0, vector_s) point_e = tr.point_move(p0, vector_e) return [point_s, point_e]
#Character entity characters_data = ext_characters.api_data(log) characters_list = transform.extract_data('character', characters_data, 'name', 'gender', 'url', log) denorm_character_data_1 = transform.denormalizing_data_list( 'Characters', characters_data, 'titles', 'name', 'gender', 'playedBy', 'url', log) denorm_character_data_2 = transform.denormalizing_data_list( 'Characters', denorm_character_data_1, 'playedBy', 'name', 'gender', 'titles', 'url', log) denorm_character_data = transform.dict_to_list(denorm_character_data_2) trf_char = transform.Transform(denorm_character_data) character_actor_dictionary = dict(trf_char.key_values(3)) #print(character_actor_dictionary) character_to_actor = trf_char.relate_entities(character_actor_dictionary, 3) #print(character_to_actor) character_title_dictionary = dict(trf_char.key_values(2)) character_to_title = trf_char.relate_entities(character_title_dictionary, 2) character_load = load_data.Load(cur, conn, log) character_load.load_three_fields('characters', 'character_name', 'gender', 'character_id', characters_list) character_load.load_two_fields('actors', 'actor_name', 'actor_id', trf_char.key_values(3)) character_load.load_two_fields('character_to_actor', 'character_id', 'actor_id', character_to_actor) character_load.load_two_fields('titles', 'title', 'title_id',